aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-12-03 07:49:39 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-03 07:49:39 -0500
commit220d0b1dbf78c6417a658c96e571415552d3abac (patch)
tree70cd3862540c38ea490e7a27c3c7acc35b680234 /fs
parent474b18ccc264c472abeec50f48469b6477202699 (diff)
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
Merge branch 'master' into for-2.6.33
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/9p/vfs_dir.c93
-rw-r--r--fs/9p/vfs_inode.c5
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/btrfs/extent-tree.c113
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c95
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/transaction.c19
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c4
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext3/fsync.c36
-rw-r--r--fs/ext3/inode.c36
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c36
-rw-r--r--fs/ext4/inode.c24
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/nilfs2/cpfile.c2
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/ioctl.c39
-rw-r--r--fs/nilfs2/segment.c17
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/refcounttree.c69
-rw-r--r--fs/ocfs2/super.c20
-rw-r--r--fs/ocfs2/uptodate.c5
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/sysfs/dir.c4
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_trans_ail.c23
72 files changed, 1896 insertions, 449 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a346..e777961939f3 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
343 343
344 BUG_ON(!vcookie->fscache); 344 BUG_ON(!vcookie->fscache);
345 345
346 if (PageFsCache(page)) { 346 return fscache_maybe_release_page(vcookie->fscache, page, gfp);
347 if (fscache_check_page_write(vcookie->fscache, page)) {
348 if (!(gfp & __GFP_WAIT))
349 return 0;
350 fscache_wait_on_page_write(vcookie->fscache, page);
351 }
352
353 fscache_uncache_page(vcookie->fscache, page);
354 ClearPageFsCache(page);
355 }
356
357 return 1;
358} 347}
359 348
360void __v9fs_fscache_invalidate_page(struct page *page) 349void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
368 fscache_wait_on_page_write(vcookie->fscache, page); 357 fscache_wait_on_page_write(vcookie->fscache, page);
369 BUG_ON(!PageLocked(page)); 358 BUG_ON(!PageLocked(page));
370 fscache_uncache_page(vcookie->fscache, page); 359 fscache_uncache_page(vcookie->fscache, page);
371 ClearPageFsCache(page);
372 } 360 }
373} 361}
374 362
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 873cd31baa47..15cce53bf61e 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -40,6 +40,24 @@
40#include "fid.h" 40#include "fid.h"
41 41
42/** 42/**
43 * struct p9_rdir - readdir accounting
44 * @mutex: mutex protecting readdir
45 * @head: start offset of current dirread buffer
46 * @tail: end offset of current dirread buffer
47 * @buf: dirread buffer
48 *
49 * private structure for keeping track of readdir
50 * allocated on demand
51 */
52
53struct p9_rdir {
54 struct mutex mutex;
55 int head;
56 int tail;
57 uint8_t *buf;
58};
59
60/**
43 * dt_type - return file type 61 * dt_type - return file type
44 * @mistat: mistat structure 62 * @mistat: mistat structure
45 * 63 *
@@ -70,56 +88,79 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
70{ 88{
71 int over; 89 int over;
72 struct p9_wstat st; 90 struct p9_wstat st;
73 int err; 91 int err = 0;
74 struct p9_fid *fid; 92 struct p9_fid *fid;
75 int buflen; 93 int buflen;
76 char *statbuf; 94 int reclen = 0;
77 int n, i = 0; 95 struct p9_rdir *rdir;
78 96
79 P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); 97 P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
80 fid = filp->private_data; 98 fid = filp->private_data;
81 99
82 buflen = fid->clnt->msize - P9_IOHDRSZ; 100 buflen = fid->clnt->msize - P9_IOHDRSZ;
83 statbuf = kmalloc(buflen, GFP_KERNEL); 101
84 if (!statbuf) 102 /* allocate rdir on demand */
85 return -ENOMEM; 103 if (!fid->rdir) {
86 104 rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL);
87 while (1) { 105
88 err = v9fs_file_readn(filp, statbuf, NULL, buflen, 106 if (rdir == NULL) {
89 fid->rdir_fpos); 107 err = -ENOMEM;
90 if (err <= 0) 108 goto exit;
91 break; 109 }
92 110 spin_lock(&filp->f_dentry->d_lock);
93 n = err; 111 if (!fid->rdir) {
94 while (i < n) { 112 rdir->buf = (uint8_t *)rdir + sizeof(struct p9_rdir);
95 err = p9stat_read(statbuf + i, buflen-i, &st, 113 mutex_init(&rdir->mutex);
96 fid->clnt->dotu); 114 rdir->head = rdir->tail = 0;
115 fid->rdir = (void *) rdir;
116 rdir = NULL;
117 }
118 spin_unlock(&filp->f_dentry->d_lock);
119 kfree(rdir);
120 }
121 rdir = (struct p9_rdir *) fid->rdir;
122
123 err = mutex_lock_interruptible(&rdir->mutex);
124 while (err == 0) {
125 if (rdir->tail == rdir->head) {
126 err = v9fs_file_readn(filp, rdir->buf, NULL,
127 buflen, filp->f_pos);
128 if (err <= 0)
129 goto unlock_and_exit;
130
131 rdir->head = 0;
132 rdir->tail = err;
133 }
134
135 while (rdir->head < rdir->tail) {
136 err = p9stat_read(rdir->buf + rdir->head,
137 buflen - rdir->head, &st,
138 fid->clnt->dotu);
97 if (err) { 139 if (err) {
98 P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); 140 P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
99 err = -EIO; 141 err = -EIO;
100 p9stat_free(&st); 142 p9stat_free(&st);
101 goto free_and_exit; 143 goto unlock_and_exit;
102 } 144 }
103 145 reclen = st.size+2;
104 i += st.size+2;
105 fid->rdir_fpos += st.size+2;
106 146
107 over = filldir(dirent, st.name, strlen(st.name), 147 over = filldir(dirent, st.name, strlen(st.name),
108 filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st)); 148 filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st));
109 149
110 filp->f_pos += st.size+2;
111
112 p9stat_free(&st); 150 p9stat_free(&st);
113 151
114 if (over) { 152 if (over) {
115 err = 0; 153 err = 0;
116 goto free_and_exit; 154 goto unlock_and_exit;
117 } 155 }
156 rdir->head += reclen;
157 filp->f_pos += reclen;
118 } 158 }
119 } 159 }
120 160
121free_and_exit: 161unlock_and_exit:
122 kfree(statbuf); 162 mutex_unlock(&rdir->mutex);
163exit:
123 return err; 164 return err;
124} 165}
125 166
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5947628aefef..18f74ec4dce9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -994,8 +994,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
994 P9_DPRINTK(P9_DEBUG_VFS, 994 P9_DPRINTK(P9_DEBUG_VFS,
995 "%s -> %s (%s)\n", dentry->d_name.name, st->extension, buffer); 995 "%s -> %s (%s)\n", dentry->d_name.name, st->extension, buffer);
996 996
997 retval = buflen; 997 retval = strnlen(buffer, buflen);
998
999done: 998done:
1000 kfree(st); 999 kfree(st);
1001 return retval; 1000 return retval;
@@ -1062,7 +1061,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
1062 __putname(link); 1061 __putname(link);
1063 link = ERR_PTR(len); 1062 link = ERR_PTR(len);
1064 } else 1063 } else
1065 link[len] = 0; 1064 link[min(len, PATH_MAX-1)] = 0;
1066 } 1065 }
1067 nd_set_link(nd, link); 1066 nd_set_link(nd, link);
1068 1067
diff --git a/fs/Kconfig b/fs/Kconfig
index 2126078a38ed..64d44efad7a5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -135,7 +135,7 @@ config TMPFS_POSIX_ACL
135 135
136config HUGETLBFS 136config HUGETLBFS
137 bool "HugeTLB file system support" 137 bool "HugeTLB file system support"
138 depends on X86 || IA64 || PPC_BOOK3S_64 || SPARC64 || (S390 && 64BIT) || \ 138 depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
139 SYS_SUPPORTS_HUGETLBFS || BROKEN 139 SYS_SUPPORTS_HUGETLBFS || BROKEN
140 help 140 help
141 hugetlbfs is a filesystem backing for HugeTLB pages, based on 141 hugetlbfs is a filesystem backing for HugeTLB pages, based on
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013f..39b301662f22 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
316 fscache_wait_on_page_write(vnode->cache, page); 316 fscache_wait_on_page_write(vnode->cache, page);
317 fscache_uncache_page(vnode->cache, page); 317 fscache_uncache_page(vnode->cache, page);
318 ClearPageFsCache(page);
319 } 318 }
320#endif 319#endif
321 320
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
349 /* deny if page is being written to the cache and the caller hasn't 348 /* deny if page is being written to the cache and the caller hasn't
350 * elected to wait */ 349 * elected to wait */
351#ifdef CONFIG_AFS_FSCACHE 350#ifdef CONFIG_AFS_FSCACHE
352 if (PageFsCache(page)) { 351 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
353 if (fscache_check_page_write(vnode->cache, page)) { 352 _leave(" = F [cache busy]");
354 if (!(gfp_flags & __GFP_WAIT)) { 353 return 0;
355 _leave(" = F [cache busy]");
356 return 0;
357 }
358 fscache_wait_on_page_write(vnode->cache, page);
359 }
360
361 fscache_uncache_page(vnode->cache, page);
362 ClearPageFsCache(page);
363 } 354 }
364#endif 355#endif
365 356
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e238a0cdac67..94627c4cc193 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2977 2977
2978 free_space = btrfs_super_total_bytes(disk_super); 2978 free_space = btrfs_super_total_bytes(disk_super);
2979 /* 2979 /*
2980 * we allow the metadata to grow to a max of either 5gb or 5% of the 2980 * we allow the metadata to grow to a max of either 10gb or 5% of the
2981 * space in the volume. 2981 * space in the volume.
2982 */ 2982 */
2983 min_metadata = min((u64)5 * 1024 * 1024 * 1024, 2983 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
2984 div64_u64(free_space * 5, 100)); 2984 div64_u64(free_space * 5, 100));
2985 if (info->total_bytes >= min_metadata) { 2985 if (info->total_bytes >= min_metadata) {
2986 spin_unlock(&info->lock); 2986 spin_unlock(&info->lock);
@@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4102} 4102}
4103 4103
4104enum btrfs_loop_type { 4104enum btrfs_loop_type {
4105 LOOP_CACHED_ONLY = 0, 4105 LOOP_FIND_IDEAL = 0,
4106 LOOP_CACHING_NOWAIT = 1, 4106 LOOP_CACHING_NOWAIT = 1,
4107 LOOP_CACHING_WAIT = 2, 4107 LOOP_CACHING_WAIT = 2,
4108 LOOP_ALLOC_CHUNK = 3, 4108 LOOP_ALLOC_CHUNK = 3,
@@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4131 struct btrfs_block_group_cache *block_group = NULL; 4131 struct btrfs_block_group_cache *block_group = NULL;
4132 int empty_cluster = 2 * 1024 * 1024; 4132 int empty_cluster = 2 * 1024 * 1024;
4133 int allowed_chunk_alloc = 0; 4133 int allowed_chunk_alloc = 0;
4134 int done_chunk_alloc = 0;
4134 struct btrfs_space_info *space_info; 4135 struct btrfs_space_info *space_info;
4135 int last_ptr_loop = 0; 4136 int last_ptr_loop = 0;
4136 int loop = 0; 4137 int loop = 0;
4137 bool found_uncached_bg = false; 4138 bool found_uncached_bg = false;
4138 bool failed_cluster_refill = false; 4139 bool failed_cluster_refill = false;
4139 bool failed_alloc = false; 4140 bool failed_alloc = false;
4141 u64 ideal_cache_percent = 0;
4142 u64 ideal_cache_offset = 0;
4140 4143
4141 WARN_ON(num_bytes < root->sectorsize); 4144 WARN_ON(num_bytes < root->sectorsize);
4142 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4145 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4172 empty_cluster = 0; 4175 empty_cluster = 0;
4173 4176
4174 if (search_start == hint_byte) { 4177 if (search_start == hint_byte) {
4178ideal_cache:
4175 block_group = btrfs_lookup_block_group(root->fs_info, 4179 block_group = btrfs_lookup_block_group(root->fs_info,
4176 search_start); 4180 search_start);
4177 /* 4181 /*
4178 * we don't want to use the block group if it doesn't match our 4182 * we don't want to use the block group if it doesn't match our
4179 * allocation bits, or if its not cached. 4183 * allocation bits, or if its not cached.
4184 *
4185 * However if we are re-searching with an ideal block group
4186 * picked out then we don't care that the block group is cached.
4180 */ 4187 */
4181 if (block_group && block_group_bits(block_group, data) && 4188 if (block_group && block_group_bits(block_group, data) &&
4182 block_group_cache_done(block_group)) { 4189 (block_group->cached != BTRFS_CACHE_NO ||
4190 search_start == ideal_cache_offset)) {
4183 down_read(&space_info->groups_sem); 4191 down_read(&space_info->groups_sem);
4184 if (list_empty(&block_group->list) || 4192 if (list_empty(&block_group->list) ||
4185 block_group->ro) { 4193 block_group->ro) {
@@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4191 */ 4199 */
4192 btrfs_put_block_group(block_group); 4200 btrfs_put_block_group(block_group);
4193 up_read(&space_info->groups_sem); 4201 up_read(&space_info->groups_sem);
4194 } else 4202 } else {
4195 goto have_block_group; 4203 goto have_block_group;
4204 }
4196 } else if (block_group) { 4205 } else if (block_group) {
4197 btrfs_put_block_group(block_group); 4206 btrfs_put_block_group(block_group);
4198 } 4207 }
4199 } 4208 }
4200
4201search: 4209search:
4202 down_read(&space_info->groups_sem); 4210 down_read(&space_info->groups_sem);
4203 list_for_each_entry(block_group, &space_info->block_groups, list) { 4211 list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4209,28 +4217,45 @@ search:
4209 4217
4210have_block_group: 4218have_block_group:
4211 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4219 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4220 u64 free_percent;
4221
4222 free_percent = btrfs_block_group_used(&block_group->item);
4223 free_percent *= 100;
4224 free_percent = div64_u64(free_percent,
4225 block_group->key.offset);
4226 free_percent = 100 - free_percent;
4227 if (free_percent > ideal_cache_percent &&
4228 likely(!block_group->ro)) {
4229 ideal_cache_offset = block_group->key.objectid;
4230 ideal_cache_percent = free_percent;
4231 }
4232
4212 /* 4233 /*
4213 * we want to start caching kthreads, but not too many 4234 * We only want to start kthread caching if we are at
4214 * right off the bat so we don't overwhelm the system, 4235 * the point where we will wait for caching to make
4215 * so only start them if there are less than 2 and we're 4236 * progress, or if our ideal search is over and we've
4216 * in the initial allocation phase. 4237 * found somebody to start caching.
4217 */ 4238 */
4218 if (loop > LOOP_CACHING_NOWAIT || 4239 if (loop > LOOP_CACHING_NOWAIT ||
4219 atomic_read(&space_info->caching_threads) < 2) { 4240 (loop > LOOP_FIND_IDEAL &&
4241 atomic_read(&space_info->caching_threads) < 2)) {
4220 ret = cache_block_group(block_group); 4242 ret = cache_block_group(block_group);
4221 BUG_ON(ret); 4243 BUG_ON(ret);
4222 } 4244 }
4223 }
4224
4225 cached = block_group_cache_done(block_group);
4226 if (unlikely(!cached)) {
4227 found_uncached_bg = true; 4245 found_uncached_bg = true;
4228 4246
4229 /* if we only want cached bgs, loop */ 4247 /*
4230 if (loop == LOOP_CACHED_ONLY) 4248 * If loop is set for cached only, try the next block
4249 * group.
4250 */
4251 if (loop == LOOP_FIND_IDEAL)
4231 goto loop; 4252 goto loop;
4232 } 4253 }
4233 4254
4255 cached = block_group_cache_done(block_group);
4256 if (unlikely(!cached))
4257 found_uncached_bg = true;
4258
4234 if (unlikely(block_group->ro)) 4259 if (unlikely(block_group->ro))
4235 goto loop; 4260 goto loop;
4236 4261
@@ -4410,9 +4435,11 @@ loop:
4410 } 4435 }
4411 up_read(&space_info->groups_sem); 4436 up_read(&space_info->groups_sem);
4412 4437
4413 /* LOOP_CACHED_ONLY, only search fully cached block groups 4438 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4414 * LOOP_CACHING_NOWAIT, search partially cached block groups, but 4439 * for them to make caching progress. Also
4415 * dont wait foR them to finish caching 4440 * determine the best possible bg to cache
4441 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4442 * caching kthreads as we move along
4416 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4443 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4417 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4444 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4418 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4445 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4421,12 +4448,47 @@ loop:
4421 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 4448 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4422 (found_uncached_bg || empty_size || empty_cluster || 4449 (found_uncached_bg || empty_size || empty_cluster ||
4423 allowed_chunk_alloc)) { 4450 allowed_chunk_alloc)) {
4424 if (found_uncached_bg) { 4451 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4425 found_uncached_bg = false; 4452 found_uncached_bg = false;
4426 if (loop < LOOP_CACHING_WAIT) { 4453 loop++;
4427 loop++; 4454 if (!ideal_cache_percent &&
4455 atomic_read(&space_info->caching_threads))
4428 goto search; 4456 goto search;
4429 } 4457
4458 /*
4459 * 1 of the following 2 things have happened so far
4460 *
4461 * 1) We found an ideal block group for caching that
4462 * is mostly full and will cache quickly, so we might
4463 * as well wait for it.
4464 *
4465 * 2) We searched for cached only and we didn't find
4466 * anything, and we didn't start any caching kthreads
4467 * either, so chances are we will loop through and
4468 * start a couple caching kthreads, and then come back
4469 * around and just wait for them. This will be slower
4470 * because we will have 2 caching kthreads reading at
4471 * the same time when we could have just started one
4472 * and waited for it to get far enough to give us an
4473 * allocation, so go ahead and go to the wait caching
4474 * loop.
4475 */
4476 loop = LOOP_CACHING_WAIT;
4477 search_start = ideal_cache_offset;
4478 ideal_cache_percent = 0;
4479 goto ideal_cache;
4480 } else if (loop == LOOP_FIND_IDEAL) {
4481 /*
4482 * Didn't find a uncached bg, wait on anything we find
4483 * next.
4484 */
4485 loop = LOOP_CACHING_WAIT;
4486 goto search;
4487 }
4488
4489 if (loop < LOOP_CACHING_WAIT) {
4490 loop++;
4491 goto search;
4430 } 4492 }
4431 4493
4432 if (loop == LOOP_ALLOC_CHUNK) { 4494 if (loop == LOOP_ALLOC_CHUNK) {
@@ -4438,7 +4500,8 @@ loop:
4438 ret = do_chunk_alloc(trans, root, num_bytes + 4500 ret = do_chunk_alloc(trans, root, num_bytes +
4439 2 * 1024 * 1024, data, 1); 4501 2 * 1024 * 1024, data, 1);
4440 allowed_chunk_alloc = 0; 4502 allowed_chunk_alloc = 0;
4441 } else { 4503 done_chunk_alloc = 1;
4504 } else if (!done_chunk_alloc) {
4442 space_info->force_alloc = 1; 4505 space_info->force_alloc = 1;
4443 } 4506 }
4444 4507
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2c726b7b9faa..ccbdcb54ec5d 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
208 write_lock(&tree->lock); 208 write_lock(&tree->lock);
209 em = lookup_extent_mapping(tree, start, len); 209 em = lookup_extent_mapping(tree, start, len);
210 210
211 WARN_ON(em->start != start || !em); 211 WARN_ON(!em || em->start != start);
212 212
213 if (!em) 213 if (!em)
214 goto out; 214 goto out;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5c2caad76212..cb2849f03251 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1296,7 +1296,7 @@ again:
1296 window_start = entry->offset; 1296 window_start = entry->offset;
1297 window_free = entry->bytes; 1297 window_free = entry->bytes;
1298 last = entry; 1298 last = entry;
1299 max_extent = 0; 1299 max_extent = entry->bytes;
1300 } else { 1300 } else {
1301 last = next; 1301 last = next;
1302 window_free += next->bytes; 1302 window_free += next->bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dae12dc7e159..b3ad168a0bfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
538 struct btrfs_root *root = BTRFS_I(inode)->root; 538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree; 540 struct extent_io_tree *io_tree;
541 int ret; 541 int ret = 0;
542 542
543 if (list_empty(&async_cow->extents)) 543 if (list_empty(&async_cow->extents))
544 return 0; 544 return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
552 552
553 io_tree = &BTRFS_I(inode)->io_tree; 553 io_tree = &BTRFS_I(inode)->io_tree;
554 554
555retry:
555 /* did the compression code fall back to uncompressed IO? */ 556 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) { 557 if (!async_extent->pages) {
557 int page_started = 0; 558 int page_started = 0;
@@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
562 async_extent->ram_size - 1, GFP_NOFS); 563 async_extent->ram_size - 1, GFP_NOFS);
563 564
564 /* allocate blocks */ 565 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page, 566 ret = cow_file_range(inode, async_cow->locked_page,
566 async_extent->start, 567 async_extent->start,
567 async_extent->start + 568 async_extent->start +
568 async_extent->ram_size - 1, 569 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0); 570 &page_started, &nr_written, 0);
570 571
571 /* 572 /*
572 * if page_started, cow_file_range inserted an 573 * if page_started, cow_file_range inserted an
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
574 * and IO for us. Otherwise, we need to submit 575 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive. 576 * all those pages down to the drive.
576 */ 577 */
577 if (!page_started) 578 if (!page_started && !ret)
578 extent_write_locked_range(io_tree, 579 extent_write_locked_range(io_tree,
579 inode, async_extent->start, 580 inode, async_extent->start,
580 async_extent->start + 581 async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
602 async_extent->compressed_size, 603 async_extent->compressed_size,
603 0, alloc_hint, 604 0, alloc_hint,
604 (u64)-1, &ins, 1); 605 (u64)-1, &ins, 1);
605 BUG_ON(ret); 606 if (ret) {
607 int i;
608 for (i = 0; i < async_extent->nr_pages; i++) {
609 WARN_ON(async_extent->pages[i]->mapping);
610 page_cache_release(async_extent->pages[i]);
611 }
612 kfree(async_extent->pages);
613 async_extent->nr_pages = 0;
614 async_extent->pages = NULL;
615 unlock_extent(io_tree, async_extent->start,
616 async_extent->start +
617 async_extent->ram_size - 1, GFP_NOFS);
618 goto retry;
619 }
620
606 em = alloc_extent_map(GFP_NOFS); 621 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start; 622 em->start = async_extent->start;
608 em->len = async_extent->ram_size; 623 em->len = async_extent->ram_size;
@@ -743,8 +758,22 @@ static noinline int cow_file_range(struct inode *inode,
743 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree, 758 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
744 start, num_bytes); 759 start, num_bytes);
745 if (em) { 760 if (em) {
746 alloc_hint = em->block_start; 761 /*
747 free_extent_map(em); 762 * if block start isn't an actual block number then find the
763 * first block in this inode and use that as a hint. If that
764 * block is also bogus then just don't worry about it.
765 */
766 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
767 free_extent_map(em);
768 em = search_extent_mapping(em_tree, 0, 0);
769 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
770 alloc_hint = em->block_start;
771 if (em)
772 free_extent_map(em);
773 } else {
774 alloc_hint = em->block_start;
775 free_extent_map(em);
776 }
748 } 777 }
749 read_unlock(&BTRFS_I(inode)->extent_tree.lock); 778 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
750 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 779 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2474 2503
2475 root = BTRFS_I(dir)->root; 2504 root = BTRFS_I(dir)->root;
2476 2505
2506 /*
2507 * 5 items for unlink inode
2508 * 1 for orphan
2509 */
2510 ret = btrfs_reserve_metadata_space(root, 6);
2511 if (ret)
2512 return ret;
2513
2477 trans = btrfs_start_transaction(root, 1); 2514 trans = btrfs_start_transaction(root, 1);
2515 if (IS_ERR(trans)) {
2516 btrfs_unreserve_metadata_space(root, 6);
2517 return PTR_ERR(trans);
2518 }
2478 2519
2479 btrfs_set_trans_block_group(trans, dir); 2520 btrfs_set_trans_block_group(trans, dir);
2480 2521
@@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2489 nr = trans->blocks_used; 2530 nr = trans->blocks_used;
2490 2531
2491 btrfs_end_transaction_throttle(trans, root); 2532 btrfs_end_transaction_throttle(trans, root);
2533 btrfs_unreserve_metadata_space(root, 6);
2492 btrfs_btree_balance_dirty(root, nr); 2534 btrfs_btree_balance_dirty(root, nr);
2493 return ret; 2535 return ret;
2494} 2536}
@@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2569 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 2611 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2570 return -ENOTEMPTY; 2612 return -ENOTEMPTY;
2571 2613
2614 ret = btrfs_reserve_metadata_space(root, 5);
2615 if (ret)
2616 return ret;
2617
2572 trans = btrfs_start_transaction(root, 1); 2618 trans = btrfs_start_transaction(root, 1);
2619 if (IS_ERR(trans)) {
2620 btrfs_unreserve_metadata_space(root, 5);
2621 return PTR_ERR(trans);
2622 }
2623
2573 btrfs_set_trans_block_group(trans, dir); 2624 btrfs_set_trans_block_group(trans, dir);
2574 2625
2575 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 2626 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2592out: 2643out:
2593 nr = trans->blocks_used; 2644 nr = trans->blocks_used;
2594 ret = btrfs_end_transaction_throttle(trans, root); 2645 ret = btrfs_end_transaction_throttle(trans, root);
2646 btrfs_unreserve_metadata_space(root, 5);
2595 btrfs_btree_balance_dirty(root, nr); 2647 btrfs_btree_balance_dirty(root, nr);
2596 2648
2597 if (ret && !err) 2649 if (ret && !err)
@@ -5128,6 +5180,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
5128 ei->logged_trans = 0; 5180 ei->logged_trans = 0;
5129 ei->outstanding_extents = 0; 5181 ei->outstanding_extents = 0;
5130 ei->reserved_extents = 0; 5182 ei->reserved_extents = 0;
5183 ei->root = NULL;
5131 spin_lock_init(&ei->accounting_lock); 5184 spin_lock_init(&ei->accounting_lock);
5132 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5185 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5133 INIT_LIST_HEAD(&ei->i_orphan); 5186 INIT_LIST_HEAD(&ei->i_orphan);
@@ -5144,6 +5197,14 @@ void btrfs_destroy_inode(struct inode *inode)
5144 WARN_ON(inode->i_data.nrpages); 5197 WARN_ON(inode->i_data.nrpages);
5145 5198
5146 /* 5199 /*
5200 * This can happen where we create an inode, but somebody else also
5201 * created the same inode and we need to destroy the one we already
5202 * created.
5203 */
5204 if (!root)
5205 goto free;
5206
5207 /*
5147 * Make sure we're properly removed from the ordered operation 5208 * Make sure we're properly removed from the ordered operation
5148 * lists. 5209 * lists.
5149 */ 5210 */
@@ -5178,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
5178 } 5239 }
5179 inode_tree_del(inode); 5240 inode_tree_del(inode);
5180 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 5241 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5242free:
5181 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 5243 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5182} 5244}
5183 5245
@@ -5283,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5283 return -ENOTEMPTY; 5345 return -ENOTEMPTY;
5284 5346
5285 /* 5347 /*
5286 * 2 items for dir items 5348 * We want to reserve the absolute worst case amount of items. So if
5287 * 1 item for orphan entry 5349 * both inodes are subvols and we need to unlink them then that would
5288 * 1 item for ref 5350 * require 4 item modifications, but if they are both normal inodes it
5351 * would require 5 item modifications, so we'll assume their normal
5352 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5353 * should cover the worst case number of items we'll modify.
5289 */ 5354 */
5290 ret = btrfs_reserve_metadata_space(root, 4); 5355 ret = btrfs_reserve_metadata_space(root, 11);
5291 if (ret) 5356 if (ret)
5292 return ret; 5357 return ret;
5293 5358
@@ -5403,7 +5468,7 @@ out_fail:
5403 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5468 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5404 up_read(&root->fs_info->subvol_sem); 5469 up_read(&root->fs_info->subvol_sem);
5405 5470
5406 btrfs_unreserve_metadata_space(root, 4); 5471 btrfs_unreserve_metadata_space(root, 11);
5407 return ret; 5472 return ret;
5408} 5473}
5409 5474
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 9351428f30e2..67fa2d29d663 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
159 write_extent_buffer(l, item, ptr, sizeof(*item)); 159 write_extent_buffer(l, item, ptr, sizeof(*item));
160 btrfs_mark_buffer_dirty(path->nodes[0]); 160 btrfs_mark_buffer_dirty(path->nodes[0]);
161out: 161out:
162 btrfs_release_path(root, path);
163 btrfs_free_path(path); 162 btrfs_free_path(path);
164 return ret; 163 return ret;
165} 164}
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
332 BUG_ON(refs != 0); 331 BUG_ON(refs != 0);
333 ret = btrfs_del_item(trans, root, path); 332 ret = btrfs_del_item(trans, root, path);
334out: 333out:
335 btrfs_release_path(root, path);
336 btrfs_free_path(path); 334 btrfs_free_path(path);
337 return ret; 335 return ret;
338} 336}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index bca82a4ca8e6..c207e8c32c9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
163 } 163 }
164} 164}
165 165
166enum btrfs_trans_type {
167 TRANS_START,
168 TRANS_JOIN,
169 TRANS_USERSPACE,
170};
171
166static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 172static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
167 int num_blocks, int wait) 173 int num_blocks, int type)
168{ 174{
169 struct btrfs_trans_handle *h = 175 struct btrfs_trans_handle *h =
170 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 176 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
@@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
172 178
173 mutex_lock(&root->fs_info->trans_mutex); 179 mutex_lock(&root->fs_info->trans_mutex);
174 if (!root->fs_info->log_root_recovering && 180 if (!root->fs_info->log_root_recovering &&
175 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) 181 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
182 type == TRANS_USERSPACE))
176 wait_current_trans(root); 183 wait_current_trans(root);
177 ret = join_transaction(root); 184 ret = join_transaction(root);
178 BUG_ON(ret); 185 BUG_ON(ret);
@@ -186,7 +193,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 193 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 194 h->delayed_ref_updates = 0;
188 195
189 if (!current->journal_info) 196 if (!current->journal_info && type != TRANS_USERSPACE)
190 current->journal_info = h; 197 current->journal_info = h;
191 198
192 root->fs_info->running_transaction->use_count++; 199 root->fs_info->running_transaction->use_count++;
@@ -198,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
198struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 205struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
199 int num_blocks) 206 int num_blocks)
200{ 207{
201 return start_transaction(root, num_blocks, 1); 208 return start_transaction(root, num_blocks, TRANS_START);
202} 209}
203struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 210struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
204 int num_blocks) 211 int num_blocks)
205{ 212{
206 return start_transaction(root, num_blocks, 0); 213 return start_transaction(root, num_blocks, TRANS_JOIN);
207} 214}
208 215
209struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 216struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
210 int num_blocks) 217 int num_blocks)
211{ 218{
212 return start_transaction(r, num_blocks, 2); 219 return start_transaction(r, num_blocks, TRANS_USERSPACE);
213} 220}
214 221
215/* wait for a transaction commit to be fully complete */ 222/* wait for a transaction commit to be fully complete */
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a7..27089311fbea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
114 114
115/* 115/*
116 * attempt to look up the nominated node in this cache 116 * attempt to look up the nominated node in this cache
117 * - return -ETIMEDOUT to be scheduled again
117 */ 118 */
118static void cachefiles_lookup_object(struct fscache_object *_object) 119static int cachefiles_lookup_object(struct fscache_object *_object)
119{ 120{
120 struct cachefiles_lookup_data *lookup_data; 121 struct cachefiles_lookup_data *lookup_data;
121 struct cachefiles_object *parent, *object; 122 struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
145 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) 146 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
146 cachefiles_attr_changed(&object->fscache); 147 cachefiles_attr_changed(&object->fscache);
147 148
148 if (ret < 0) { 149 if (ret < 0 && ret != -ETIMEDOUT) {
149 printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", 150 if (ret != -ENOBUFS)
150 ret); 151 printk(KERN_WARNING
152 "CacheFiles: Lookup failed error %d\n", ret);
151 fscache_object_lookup_error(&object->fscache); 153 fscache_object_lookup_error(&object->fscache);
152 } 154 }
153 155
154 _leave(" [%d]", ret); 156 _leave(" [%d]", ret);
157 return ret;
155} 158}
156 159
157/* 160/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
331 } 334 }
332 335
333 cache = object->fscache.cache; 336 cache = object->fscache.cache;
337 fscache_object_destroy(&object->fscache);
334 kmem_cache_free(cachefiles_object_jar, object); 338 kmem_cache_free(cachefiles_object_jar, object);
335 fscache_object_destroyed(cache); 339 fscache_object_destroyed(cache);
336 } 340 }
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
403 if (oi_size == ni_size) 407 if (oi_size == ni_size)
404 return 0; 408 return 0;
405 409
406 newattrs.ia_size = ni_size;
407 newattrs.ia_valid = ATTR_SIZE;
408
409 cachefiles_begin_secure(cache, &saved_cred); 410 cachefiles_begin_secure(cache, &saved_cred);
410 mutex_lock(&object->backer->d_inode->i_mutex); 411 mutex_lock(&object->backer->d_inode->i_mutex);
412
413 /* if there's an extension to a partial page at the end of the backing
414 * file, we need to discard the partial page so that we pick up new
415 * data after it */
416 if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
417 _debug("discard tail %llx", oi_size);
418 newattrs.ia_valid = ATTR_SIZE;
419 newattrs.ia_size = oi_size & PAGE_MASK;
420 ret = notify_change(object->backer, &newattrs);
421 if (ret < 0)
422 goto truncate_failed;
423 }
424
425 newattrs.ia_valid = ATTR_SIZE;
426 newattrs.ia_size = ni_size;
411 ret = notify_change(object->backer, &newattrs); 427 ret = notify_change(object->backer, &newattrs);
428
429truncate_failed:
412 mutex_unlock(&object->backer->d_inode->i_mutex); 430 mutex_unlock(&object->backer->d_inode->i_mutex);
413 cachefiles_end_secure(cache, saved_cred); 431 cachefiles_end_secure(cache, saved_cred);
414 432
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39ea..14ac4806e291 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
21#include <linux/security.h> 21#include <linux/security.h>
22#include "internal.h" 22#include "internal.h"
23 23
24static int cachefiles_wait_bit(void *flags) 24#define CACHEFILES_KEYBUF_SIZE 512
25
26/*
27 * dump debugging info about an object
28 */
29static noinline
30void __cachefiles_printk_object(struct cachefiles_object *object,
31 const char *prefix,
32 u8 *keybuf)
25{ 33{
26 schedule(); 34 struct fscache_cookie *cookie;
27 return 0; 35 unsigned keylen, loop;
36
37 printk(KERN_ERR "%sobject: OBJ%x\n",
38 prefix, object->fscache.debug_id);
39 printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
40 prefix, fscache_object_states[object->fscache.state],
41 object->fscache.flags, object->fscache.work.flags,
42 object->fscache.events,
43 object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
44 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
46 object->fscache.n_exclusive);
47 printk(KERN_ERR "%sparent=%p\n",
48 prefix, object->fscache.parent);
49
50 spin_lock(&object->fscache.lock);
51 cookie = object->fscache.cookie;
52 if (cookie) {
53 printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 prefix,
55 object->fscache.cookie,
56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags);
59 if (keybuf)
60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
61 CACHEFILES_KEYBUF_SIZE);
62 else
63 keylen = 0;
64 } else {
65 printk(KERN_ERR "%scookie=NULL\n", prefix);
66 keylen = 0;
67 }
68 spin_unlock(&object->fscache.lock);
69
70 if (keylen) {
71 printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
72 for (loop = 0; loop < keylen; loop++)
73 printk("%02x", keybuf[loop]);
74 printk("'\n");
75 }
76}
77
78/*
79 * dump debugging info about a pair of objects
80 */
81static noinline void cachefiles_printk_object(struct cachefiles_object *object,
82 struct cachefiles_object *xobject)
83{
84 u8 *keybuf;
85
86 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
87 if (object)
88 __cachefiles_printk_object(object, "", keybuf);
89 if (xobject)
90 __cachefiles_printk_object(xobject, "x", keybuf);
91 kfree(keybuf);
28} 92}
29 93
30/* 94/*
31 * record the fact that an object is now active 95 * record the fact that an object is now active
32 */ 96 */
33static void cachefiles_mark_object_active(struct cachefiles_cache *cache, 97static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
34 struct cachefiles_object *object) 98 struct cachefiles_object *object)
35{ 99{
36 struct cachefiles_object *xobject; 100 struct cachefiles_object *xobject;
37 struct rb_node **_p, *_parent = NULL; 101 struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
42try_again: 106try_again:
43 write_lock(&cache->active_lock); 107 write_lock(&cache->active_lock);
44 108
45 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) 109 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
110 printk(KERN_ERR "CacheFiles: Error: Object already active\n");
111 cachefiles_printk_object(object, NULL);
46 BUG(); 112 BUG();
113 }
47 114
48 dentry = object->dentry; 115 dentry = object->dentry;
49 _p = &cache->active_nodes.rb_node; 116 _p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
66 rb_insert_color(&object->active_node, &cache->active_nodes); 133 rb_insert_color(&object->active_node, &cache->active_nodes);
67 134
68 write_unlock(&cache->active_lock); 135 write_unlock(&cache->active_lock);
69 _leave(""); 136 _leave(" = 0");
70 return; 137 return 0;
71 138
72 /* an old object from a previous incarnation is hogging the slot - we 139 /* an old object from a previous incarnation is hogging the slot - we
73 * need to wait for it to be destroyed */ 140 * need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
76 printk(KERN_ERR "\n"); 143 printk(KERN_ERR "\n");
77 printk(KERN_ERR "CacheFiles: Error:" 144 printk(KERN_ERR "CacheFiles: Error:"
78 " Unexpected object collision\n"); 145 " Unexpected object collision\n");
79 printk(KERN_ERR "xobject: OBJ%x\n", 146 cachefiles_printk_object(object, xobject);
80 xobject->fscache.debug_id);
81 printk(KERN_ERR "xobjstate=%s\n",
82 fscache_object_states[xobject->fscache.state]);
83 printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
84 printk(KERN_ERR "xobjevent=%lx [%lx]\n",
85 xobject->fscache.events, xobject->fscache.event_mask);
86 printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
87 xobject->fscache.n_ops, xobject->fscache.n_in_progress,
88 xobject->fscache.n_exclusive);
89 printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
90 xobject->fscache.cookie,
91 xobject->fscache.cookie->parent,
92 xobject->fscache.cookie->netfs_data,
93 xobject->fscache.cookie->flags);
94 printk(KERN_ERR "xparent=%p\n",
95 xobject->fscache.parent);
96 printk(KERN_ERR "object: OBJ%x\n",
97 object->fscache.debug_id);
98 printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
99 object->fscache.cookie,
100 object->fscache.cookie->parent,
101 object->fscache.cookie->netfs_data,
102 object->fscache.cookie->flags);
103 printk(KERN_ERR "parent=%p\n",
104 object->fscache.parent);
105 BUG(); 147 BUG();
106 } 148 }
107 atomic_inc(&xobject->usage); 149 atomic_inc(&xobject->usage);
108 write_unlock(&cache->active_lock); 150 write_unlock(&cache->active_lock);
109 151
110 _debug(">>> wait"); 152 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
111 wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, 153 wait_queue_head_t *wq;
112 cachefiles_wait_bit, TASK_UNINTERRUPTIBLE); 154
113 _debug("<<< waited"); 155 signed long timeout = 60 * HZ;
156 wait_queue_t wait;
157 bool requeue;
158
159 /* if the object we're waiting for is queued for processing,
160 * then just put ourselves on the queue behind it */
161 if (slow_work_is_queued(&xobject->fscache.work)) {
162 _debug("queue OBJ%x behind OBJ%x immediately",
163 object->fscache.debug_id,
164 xobject->fscache.debug_id);
165 goto requeue;
166 }
167
168 /* otherwise we sleep until either the object we're waiting for
169 * is done, or the slow-work facility wants the thread back to
170 * do other work */
171 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
172 init_wait(&wait);
173 requeue = false;
174 do {
175 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
176 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
177 break;
178 requeue = slow_work_sleep_till_thread_needed(
179 &object->fscache.work, &timeout);
180 } while (timeout > 0 && !requeue);
181 finish_wait(wq, &wait);
182
183 if (requeue &&
184 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
185 _debug("queue OBJ%x behind OBJ%x after wait",
186 object->fscache.debug_id,
187 xobject->fscache.debug_id);
188 goto requeue;
189 }
190
191 if (timeout <= 0) {
192 printk(KERN_ERR "\n");
193 printk(KERN_ERR "CacheFiles: Error: Overlong"
194 " wait for old active object to go away\n");
195 cachefiles_printk_object(object, xobject);
196 goto requeue;
197 }
198 }
199
200 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
114 201
115 cache->cache.ops->put_object(&xobject->fscache); 202 cache->cache.ops->put_object(&xobject->fscache);
116 goto try_again; 203 goto try_again;
204
205requeue:
206 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
207 cache->cache.ops->put_object(&xobject->fscache);
208 _leave(" = -ETIMEDOUT");
209 return -ETIMEDOUT;
117} 210}
118 211
119/* 212/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
254 347
255 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
256 349
257 mutex_lock(&dir->d_inode->i_mutex); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
258 ret = cachefiles_bury_object(cache, dir, object->dentry); 351 ret = cachefiles_bury_object(cache, dir, object->dentry);
259 352
260 dput(dir); 353 dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
307 /* search the current directory for the element name */ 400 /* search the current directory for the element name */
308 _debug("lookup '%s'", name); 401 _debug("lookup '%s'", name);
309 402
310 mutex_lock(&dir->d_inode->i_mutex); 403 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
311 404
312 start = jiffies; 405 start = jiffies;
313 next = lookup_one_len(name, dir, nlen); 406 next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
418 } 511 }
419 512
420 /* note that we're now using this object */ 513 /* note that we're now using this object */
421 cachefiles_mark_object_active(cache, object); 514 ret = cachefiles_mark_object_active(cache, object);
422 515
423 mutex_unlock(&dir->d_inode->i_mutex); 516 mutex_unlock(&dir->d_inode->i_mutex);
424 dput(dir); 517 dput(dir);
425 dir = NULL; 518 dir = NULL;
426 519
520 if (ret == -ETIMEDOUT)
521 goto mark_active_timed_out;
522
427 _debug("=== OBTAINED_OBJECT ==="); 523 _debug("=== OBTAINED_OBJECT ===");
428 524
429 if (object->new) { 525 if (object->new) {
@@ -467,6 +563,10 @@ create_error:
467 cachefiles_io_error(cache, "Create/mkdir failed"); 563 cachefiles_io_error(cache, "Create/mkdir failed");
468 goto error; 564 goto error;
469 565
566mark_active_timed_out:
567 _debug("mark active timed out");
568 goto release_dentry;
569
470check_error: 570check_error:
471 _debug("check error %d", ret); 571 _debug("check error %d", ret);
472 write_lock(&cache->active_lock); 572 write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
474 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 574 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
475 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 575 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
476 write_unlock(&cache->active_lock); 576 write_unlock(&cache->active_lock);
477 577release_dentry:
478 dput(object->dentry); 578 dput(object->dentry);
479 object->dentry = NULL; 579 object->dentry = NULL;
480 goto error_out; 580 goto error_out;
@@ -495,9 +595,6 @@ error:
495error_out2: 595error_out2:
496 dput(dir); 596 dput(dir);
497error_out: 597error_out:
498 if (ret == -ENOSPC)
499 ret = -ENOBUFS;
500
501 _leave(" = error %d", -ret); 598 _leave(" = error %d", -ret);
502 return ret; 599 return ret;
503} 600}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd96..a6c8c6fe8df9 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/mount.h> 12#include <linux/mount.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/ima.h>
14#include "internal.h" 15#include "internal.h"
15 16
16/* 17/*
@@ -40,8 +41,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
40 41
41 _debug("--- monitor %p %lx ---", page, page->flags); 42 _debug("--- monitor %p %lx ---", page, page->flags);
42 43
43 if (!PageUptodate(page) && !PageError(page)) 44 if (!PageUptodate(page) && !PageError(page)) {
44 dump_stack(); 45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
47 }
45 48
46 /* remove from the waitqueue */ 49 /* remove from the waitqueue */
47 list_del(&wait->task_list); 50 list_del(&wait->task_list);
@@ -61,6 +64,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
61} 64}
62 65
63/* 66/*
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
69 * possible
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
72 */
73static int cachefiles_read_reissue(struct cachefiles_object *object,
74 struct cachefiles_one_read *monitor)
75{
76 struct address_space *bmapping = object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->back_page, *backpage2;
78 int ret;
79
80 kenter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags);
83
84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) {
86 kleave(" = -ENODATA [mapping]");
87 return -ENODATA;
88 }
89
90 backpage2 = find_get_page(bmapping, backpage->index);
91 if (!backpage2) {
92 kleave(" = -ENODATA [gone]");
93 return -ENODATA;
94 }
95
96 if (backpage != backpage2) {
97 put_page(backpage2);
98 kleave(" = -ENODATA [different]");
99 return -ENODATA;
100 }
101
102 /* the page is still there and we already have a ref on it, so we don't
103 * need a second */
104 put_page(backpage2);
105
106 INIT_LIST_HEAD(&monitor->op_link);
107 add_page_wait_queue(backpage, &monitor->monitor);
108
109 if (trylock_page(backpage)) {
110 ret = -EIO;
111 if (PageError(backpage))
112 goto unlock_discard;
113 ret = 0;
114 if (PageUptodate(backpage))
115 goto unlock_discard;
116
117 kdebug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage);
119 if (ret < 0)
120 goto unlock_discard;
121 }
122
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage)) {
127 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
128 unlock_page(backpage);
129 }
130
131 /* it'll reappear on the todo list */
132 kleave(" = -EINPROGRESS");
133 return -EINPROGRESS;
134
135unlock_discard:
136 unlock_page(backpage);
137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock);
140 kleave(" = %d", ret);
141 return ret;
142}
143
144/*
64 * copy data from backing pages to netfs pages to complete a read operation 145 * copy data from backing pages to netfs pages to complete a read operation
65 * - driven by FS-Cache's thread pool 146 * - driven by FS-Cache's thread pool
66 */ 147 */
@@ -92,20 +173,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
92 173
93 _debug("- copy {%lu}", monitor->back_page->index); 174 _debug("- copy {%lu}", monitor->back_page->index);
94 175
95 error = -EIO; 176 recheck:
96 if (PageUptodate(monitor->back_page)) { 177 if (PageUptodate(monitor->back_page)) {
97 copy_highpage(monitor->netfs_page, monitor->back_page); 178 copy_highpage(monitor->netfs_page, monitor->back_page);
98 179
99 pagevec_add(&pagevec, monitor->netfs_page); 180 pagevec_add(&pagevec, monitor->netfs_page);
100 fscache_mark_pages_cached(monitor->op, &pagevec); 181 fscache_mark_pages_cached(monitor->op, &pagevec);
101 error = 0; 182 error = 0;
102 } 183 } else if (!PageError(monitor->back_page)) {
103 184 /* the page has probably been truncated */
104 if (error) 185 error = cachefiles_read_reissue(object, monitor);
186 if (error == -EINPROGRESS)
187 goto next;
188 goto recheck;
189 } else {
105 cachefiles_io_error_obj( 190 cachefiles_io_error_obj(
106 object, 191 object,
107 "Readpage failed on backing file %lx", 192 "Readpage failed on backing file %lx",
108 (unsigned long) monitor->back_page->flags); 193 (unsigned long) monitor->back_page->flags);
194 error = -EIO;
195 }
109 196
110 page_cache_release(monitor->back_page); 197 page_cache_release(monitor->back_page);
111 198
@@ -114,6 +201,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
114 fscache_put_retrieval(op); 201 fscache_put_retrieval(op);
115 kfree(monitor); 202 kfree(monitor);
116 203
204 next:
117 /* let the thread pool have some air occasionally */ 205 /* let the thread pool have some air occasionally */
118 max--; 206 max--;
119 if (max < 0 || need_resched()) { 207 if (max < 0 || need_resched()) {
@@ -333,7 +421,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
333 421
334 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 422 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
335 423
336 op->op.flags = FSCACHE_OP_FAST; 424 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
425 op->op.flags |= FSCACHE_OP_FAST;
337 op->op.processor = cachefiles_read_copier; 426 op->op.processor = cachefiles_read_copier;
338 427
339 pagevec_init(&pagevec, 0); 428 pagevec_init(&pagevec, 0);
@@ -639,7 +728,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
639 728
640 pagevec_init(&pagevec, 0); 729 pagevec_init(&pagevec, 0);
641 730
642 op->op.flags = FSCACHE_OP_FAST; 731 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
732 op->op.flags |= FSCACHE_OP_FAST;
643 op->op.processor = cachefiles_read_copier; 733 op->op.processor = cachefiles_read_copier;
644 734
645 INIT_LIST_HEAD(&backpages); 735 INIT_LIST_HEAD(&backpages);
@@ -801,7 +891,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
801 struct cachefiles_cache *cache; 891 struct cachefiles_cache *cache;
802 mm_segment_t old_fs; 892 mm_segment_t old_fs;
803 struct file *file; 893 struct file *file;
804 loff_t pos; 894 loff_t pos, eof;
895 size_t len;
805 void *data; 896 void *data;
806 int ret; 897 int ret;
807 898
@@ -832,18 +923,33 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
832 if (IS_ERR(file)) { 923 if (IS_ERR(file)) {
833 ret = PTR_ERR(file); 924 ret = PTR_ERR(file);
834 } else { 925 } else {
926 ima_counts_get(file);
835 ret = -EIO; 927 ret = -EIO;
836 if (file->f_op->write) { 928 if (file->f_op->write) {
837 pos = (loff_t) page->index << PAGE_SHIFT; 929 pos = (loff_t) page->index << PAGE_SHIFT;
930
931 /* we mustn't write more data than we have, so we have
932 * to beware of a partial page at EOF */
933 eof = object->fscache.store_limit_l;
934 len = PAGE_SIZE;
935 if (eof & ~PAGE_MASK) {
936 ASSERTCMP(pos, <, eof);
937 if (eof - pos < PAGE_SIZE) {
938 _debug("cut short %llx to %llx",
939 pos, eof);
940 len = eof - pos;
941 ASSERTCMP(pos + len, ==, eof);
942 }
943 }
944
838 data = kmap(page); 945 data = kmap(page);
839 old_fs = get_fs(); 946 old_fs = get_fs();
840 set_fs(KERNEL_DS); 947 set_fs(KERNEL_DS);
841 ret = file->f_op->write( 948 ret = file->f_op->write(
842 file, (const void __user *) data, PAGE_SIZE, 949 file, (const void __user *) data, len, &pos);
843 &pos);
844 set_fs(old_fs); 950 set_fs(old_fs);
845 kunmap(page); 951 kunmap(page);
846 if (ret != PAGE_SIZE) 952 if (ret != len)
847 ret = -EIO; 953 ret = -EIO;
848 } 954 }
849 fput(file); 955 fput(file);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 145540a316ab..094ea65afc85 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,12 @@
1Version 1.61
2------------
3Fix append problem to Samba servers (files opened with O_APPEND could
4have duplicated data). Fix oops in cifs_lookup. Workaround problem
5mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
6Disable use of server inode numbers when server only
7partially supports them (e.g. for one server querying inode numbers on
8FindFirst fails but QPathInfo queries works).
9
1Version 1.60 10Version 1.60
2------------- 11-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path. 12Fix memory leak in reconnect. Fix oops in DFS mount error path.
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9a5e4f5f3122..29f1da761bbf 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1037,7 +1037,7 @@ init_cifs(void)
1037 if (rc) 1037 if (rc)
1038 goto out_unregister_key_type; 1038 goto out_unregister_key_type;
1039#endif 1039#endif
1040 rc = slow_work_register_user(); 1040 rc = slow_work_register_user(THIS_MODULE);
1041 if (rc) 1041 if (rc)
1042 goto out_unregister_resolver_key; 1042 goto out_unregister_resolver_key;
1043 1043
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 6928c24d1d42..5646727e33f5 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -388,4 +388,5 @@ extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
388 const struct nls_table *nls_codepage, int remap_special_chars); 388 const struct nls_table *nls_codepage, int remap_special_chars);
389extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, 389extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
390 const int netfid, __u64 *pExtAttrBits, __u64 *pMask); 390 const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
391extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
391#endif /* _CIFSPROTO_H */ 392#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index b09098079916..63ea83ff687f 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2220,16 +2220,8 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
2220 struct cifs_sb_info *cifs_sb, const char *full_path) 2220 struct cifs_sb_info *cifs_sb, const char *full_path)
2221{ 2221{
2222 int rc; 2222 int rc;
2223 __u64 inode_num;
2224 FILE_ALL_INFO *pfile_info; 2223 FILE_ALL_INFO *pfile_info;
2225 2224
2226 rc = CIFSGetSrvInodeNumber(xid, tcon, full_path, &inode_num,
2227 cifs_sb->local_nls,
2228 cifs_sb->mnt_cifs_flags &
2229 CIFS_MOUNT_MAP_SPECIAL_CHR);
2230 if (rc != -EOPNOTSUPP)
2231 return rc;
2232
2233 pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 2225 pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
2234 if (pfile_info == NULL) 2226 if (pfile_info == NULL)
2235 return -ENOMEM; 2227 return -ENOMEM;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 627a60a6c1b1..1f42f772865a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
214 posix_flags |= SMB_O_EXCL; 214 posix_flags |= SMB_O_EXCL;
215 if (oflags & O_TRUNC) 215 if (oflags & O_TRUNC)
216 posix_flags |= SMB_O_TRUNC; 216 posix_flags |= SMB_O_TRUNC;
217 if (oflags & O_APPEND)
218 posix_flags |= SMB_O_APPEND;
219 if (oflags & O_SYNC) 217 if (oflags & O_SYNC)
220 posix_flags |= SMB_O_SYNC; 218 posix_flags |= SMB_O_SYNC;
221 if (oflags & O_DIRECTORY) 219 if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
643 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let 641 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
644 * the VFS handle the create. 642 * the VFS handle the create.
645 */ 643 */
646 if (nd->flags & LOOKUP_EXCL) { 644 if (nd && (nd->flags & LOOKUP_EXCL)) {
647 d_instantiate(direntry, NULL); 645 d_instantiate(direntry, NULL);
648 return 0; 646 return NULL;
649 } 647 }
650 648
651 /* can not grab the rename sem here since it would 649 /* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
675 * reduction in network traffic in the other paths. 673 * reduction in network traffic in the other paths.
676 */ 674 */
677 if (pTcon->unix_ext) { 675 if (pTcon->unix_ext) {
678 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 676 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
679 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 677 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
680 (nd->intent.open.flags & O_CREAT)) { 678 (nd->intent.open.flags & O_CREAT)) {
681 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, 679 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 5e2492535daa..cababd8a52df 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -512,13 +512,10 @@ int cifs_get_inode_info(struct inode **pinode,
512 cifs_sb->local_nls, 512 cifs_sb->local_nls,
513 cifs_sb->mnt_cifs_flags & 513 cifs_sb->mnt_cifs_flags &
514 CIFS_MOUNT_MAP_SPECIAL_CHR); 514 CIFS_MOUNT_MAP_SPECIAL_CHR);
515 if (rc1) { 515 if (rc1 || !fattr.cf_uniqueid) {
516 cFYI(1, ("GetSrvInodeNum rc %d", rc1)); 516 cFYI(1, ("GetSrvInodeNum rc %d", rc1));
517 fattr.cf_uniqueid = iunique(sb, ROOT_I); 517 fattr.cf_uniqueid = iunique(sb, ROOT_I);
518 /* disable serverino if call not supported */ 518 cifs_autodisable_serverino(cifs_sb);
519 if (rc1 == -EINVAL)
520 cifs_sb->mnt_cifs_flags &=
521 ~CIFS_MOUNT_SERVER_INUM;
522 } 519 }
523 } else { 520 } else {
524 fattr.cf_uniqueid = iunique(sb, ROOT_I); 521 fattr.cf_uniqueid = iunique(sb, ROOT_I);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0241b25ac33f..d27d4ec6579b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -715,3 +715,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
715ctoUCS_out: 715ctoUCS_out:
716 return i; 716 return i;
717} 717}
718
719void
720cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
721{
722 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
723 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
724 cERROR(1, ("Autodisabling the use of server inode numbers on "
725 "%s. This server doesn't seem to support them "
726 "properly. Hardlinks will not be recognized on this "
727 "mount. Consider mounting with the \"noserverino\" "
728 "option to silence this message.",
729 cifs_sb->tcon->treeName));
730 }
731}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 1f098ca71636..f84062f9a985 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -727,11 +727,12 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
727 cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *) 727 cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)
728 pfindEntry, cifs_sb); 728 pfindEntry, cifs_sb);
729 729
730 /* FIXME: make _to_fattr functions fill this out */ 730 if (inum && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
731 if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
732 fattr.cf_uniqueid = inum; 731 fattr.cf_uniqueid = inum;
733 else 732 } else {
734 fattr.cf_uniqueid = iunique(sb, ROOT_I); 733 fattr.cf_uniqueid = iunique(sb, ROOT_I);
734 cifs_autodisable_serverino(cifs_sb);
735 }
735 736
736 ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); 737 ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
737 tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr); 738 tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);
diff --git a/fs/compat.c b/fs/compat.c
index d576b552e8e2..6c19040ffeef 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1532,6 +1532,8 @@ int compat_do_execve(char * filename,
1532 if (retval < 0) 1532 if (retval < 0)
1533 goto out; 1533 goto out;
1534 1534
1535 current->stack_start = current->mm->start_stack;
1536
1535 /* execve succeeded */ 1537 /* execve succeeded */
1536 current->fs->in_exec = 0; 1538 current->fs->in_exec = 0;
1537 current->in_execve = 0; 1539 current->in_execve = 0;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f91fd51b32e3..d84e7058c298 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1800,7 +1800,7 @@ struct space_resv_32 {
1800/* just account for different alignment */ 1800/* just account for different alignment */
1801static int compat_ioctl_preallocate(struct file *file, unsigned long arg) 1801static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
1802{ 1802{
1803 struct space_resv_32 __user *p32 = (void __user *)arg; 1803 struct space_resv_32 __user *p32 = compat_ptr(arg);
1804 struct space_resv __user *p = compat_alloc_user_space(sizeof(*p)); 1804 struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
1805 1805
1806 if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) || 1806 if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
@@ -2802,7 +2802,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
2802#else 2802#else
2803 case FS_IOC_RESVSP: 2803 case FS_IOC_RESVSP:
2804 case FS_IOC_RESVSP64: 2804 case FS_IOC_RESVSP64:
2805 error = ioctl_preallocate(filp, (void __user *)arg); 2805 error = ioctl_preallocate(filp, compat_ptr(arg));
2806 goto out_fput; 2806 goto out_fput;
2807#endif 2807#endif
2808 2808
diff --git a/fs/exec.c b/fs/exec.c
index d49be6bc1793..ba112bd4a339 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -624,10 +624,8 @@ int setup_arg_pages(struct linux_binprm *bprm,
624 /* Move stack pages down in memory. */ 624 /* Move stack pages down in memory. */
625 if (stack_shift) { 625 if (stack_shift) {
626 ret = shift_arg_pages(vma, stack_shift); 626 ret = shift_arg_pages(vma, stack_shift);
627 if (ret) { 627 if (ret)
628 up_write(&mm->mmap_sem); 628 goto out_unlock;
629 return ret;
630 }
631 } 629 }
632 630
633#ifdef CONFIG_STACK_GROWSUP 631#ifdef CONFIG_STACK_GROWSUP
@@ -641,7 +639,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
641 639
642out_unlock: 640out_unlock:
643 up_write(&mm->mmap_sem); 641 up_write(&mm->mmap_sem);
644 return 0; 642 return ret;
645} 643}
646EXPORT_SYMBOL(setup_arg_pages); 644EXPORT_SYMBOL(setup_arg_pages);
647 645
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 451d166bbe93..8209f266e9ad 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -46,19 +46,21 @@
46int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) 46int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
47{ 47{
48 struct inode *inode = dentry->d_inode; 48 struct inode *inode = dentry->d_inode;
49 struct ext3_inode_info *ei = EXT3_I(inode);
50 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
49 int ret = 0; 51 int ret = 0;
52 tid_t commit_tid;
53
54 if (inode->i_sb->s_flags & MS_RDONLY)
55 return 0;
50 56
51 J_ASSERT(ext3_journal_current_handle() == NULL); 57 J_ASSERT(ext3_journal_current_handle() == NULL);
52 58
53 /* 59 /*
54 * data=writeback: 60 * data=writeback,ordered:
55 * The caller's filemap_fdatawrite()/wait will sync the data. 61 * The caller's filemap_fdatawrite()/wait will sync the data.
56 * sync_inode() will sync the metadata 62 * Metadata is in the journal, we wait for a proper transaction
57 * 63 * to commit here.
58 * data=ordered:
59 * The caller's filemap_fdatawrite() will write the data and
60 * sync_inode() will write the inode if it is dirty. Then the caller's
61 * filemap_fdatawait() will wait on the pages.
62 * 64 *
63 * data=journal: 65 * data=journal:
64 * filemap_fdatawrite won't do anything (the buffers are clean). 66 * filemap_fdatawrite won't do anything (the buffers are clean).
@@ -73,22 +75,16 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
73 goto out; 75 goto out;
74 } 76 }
75 77
76 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 78 if (datasync)
77 goto flush; 79 commit_tid = atomic_read(&ei->i_datasync_tid);
80 else
81 commit_tid = atomic_read(&ei->i_sync_tid);
78 82
79 /* 83 if (log_start_commit(journal, commit_tid)) {
80 * The VFS has written the file data. If the inode is unaltered 84 log_wait_commit(journal, commit_tid);
81 * then we need not start a commit.
82 */
83 if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
84 struct writeback_control wbc = {
85 .sync_mode = WB_SYNC_ALL,
86 .nr_to_write = 0, /* sys_fsync did this */
87 };
88 ret = sync_inode(inode, &wbc);
89 goto out; 85 goto out;
90 } 86 }
91flush: 87
92 /* 88 /*
93 * In case we didn't commit a transaction, we have to flush 89 * In case we didn't commit a transaction, we have to flush
94 * disk caches manually so that data really is on persistent 90 * disk caches manually so that data really is on persistent
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index acf1b1423327..354ed3b47b30 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -699,8 +699,9 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
699 int err = 0; 699 int err = 0;
700 struct ext3_block_alloc_info *block_i; 700 struct ext3_block_alloc_info *block_i;
701 ext3_fsblk_t current_block; 701 ext3_fsblk_t current_block;
702 struct ext3_inode_info *ei = EXT3_I(inode);
702 703
703 block_i = EXT3_I(inode)->i_block_alloc_info; 704 block_i = ei->i_block_alloc_info;
704 /* 705 /*
705 * If we're splicing into a [td]indirect block (as opposed to the 706 * If we're splicing into a [td]indirect block (as opposed to the
706 * inode) then we need to get write access to the [td]indirect block 707 * inode) then we need to get write access to the [td]indirect block
@@ -741,6 +742,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
741 742
742 inode->i_ctime = CURRENT_TIME_SEC; 743 inode->i_ctime = CURRENT_TIME_SEC;
743 ext3_mark_inode_dirty(handle, inode); 744 ext3_mark_inode_dirty(handle, inode);
745 /* ext3_mark_inode_dirty already updated i_sync_tid */
746 atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
744 747
745 /* had we spliced it onto indirect block? */ 748 /* had we spliced it onto indirect block? */
746 if (where->bh) { 749 if (where->bh) {
@@ -1735,6 +1738,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1735 ssize_t ret; 1738 ssize_t ret;
1736 int orphan = 0; 1739 int orphan = 0;
1737 size_t count = iov_length(iov, nr_segs); 1740 size_t count = iov_length(iov, nr_segs);
1741 int retries = 0;
1738 1742
1739 if (rw == WRITE) { 1743 if (rw == WRITE) {
1740 loff_t final_size = offset + count; 1744 loff_t final_size = offset + count;
@@ -1757,9 +1761,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1757 } 1761 }
1758 } 1762 }
1759 1763
1764retry:
1760 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1765 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1761 offset, nr_segs, 1766 offset, nr_segs,
1762 ext3_get_block, NULL); 1767 ext3_get_block, NULL);
1768 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1769 goto retry;
1763 1770
1764 if (orphan) { 1771 if (orphan) {
1765 int err; 1772 int err;
@@ -2750,6 +2757,8 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2750 struct ext3_inode_info *ei; 2757 struct ext3_inode_info *ei;
2751 struct buffer_head *bh; 2758 struct buffer_head *bh;
2752 struct inode *inode; 2759 struct inode *inode;
2760 journal_t *journal = EXT3_SB(sb)->s_journal;
2761 transaction_t *transaction;
2753 long ret; 2762 long ret;
2754 int block; 2763 int block;
2755 2764
@@ -2827,6 +2836,30 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2827 ei->i_data[block] = raw_inode->i_block[block]; 2836 ei->i_data[block] = raw_inode->i_block[block];
2828 INIT_LIST_HEAD(&ei->i_orphan); 2837 INIT_LIST_HEAD(&ei->i_orphan);
2829 2838
2839 /*
2840 * Set transaction id's of transactions that have to be committed
2841 * to finish f[data]sync. We set them to currently running transaction
2842 * as we cannot be sure that the inode or some of its metadata isn't
2843 * part of the transaction - the inode could have been reclaimed and
2844 * now it is reread from disk.
2845 */
2846 if (journal) {
2847 tid_t tid;
2848
2849 spin_lock(&journal->j_state_lock);
2850 if (journal->j_running_transaction)
2851 transaction = journal->j_running_transaction;
2852 else
2853 transaction = journal->j_committing_transaction;
2854 if (transaction)
2855 tid = transaction->t_tid;
2856 else
2857 tid = journal->j_commit_sequence;
2858 spin_unlock(&journal->j_state_lock);
2859 atomic_set(&ei->i_sync_tid, tid);
2860 atomic_set(&ei->i_datasync_tid, tid);
2861 }
2862
2830 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && 2863 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2831 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { 2864 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2832 /* 2865 /*
@@ -3011,6 +3044,7 @@ again:
3011 err = rc; 3044 err = rc;
3012 ei->i_state &= ~EXT3_STATE_NEW; 3045 ei->i_state &= ~EXT3_STATE_NEW;
3013 3046
3047 atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
3014out_brelse: 3048out_brelse:
3015 brelse (bh); 3049 brelse (bh);
3016 ext3_std_error(inode->i_sb, err); 3050 ext3_std_error(inode->i_sb, err);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7a520a862f49..427496c4767c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -466,6 +466,8 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
466 return NULL; 466 return NULL;
467 ei->i_block_alloc_info = NULL; 467 ei->i_block_alloc_info = NULL;
468 ei->vfs_inode.i_version = 1; 468 ei->vfs_inode.i_version = 1;
469 atomic_set(&ei->i_datasync_tid, 0);
470 atomic_set(&ei->i_sync_tid, 0);
469 return &ei->vfs_inode; 471 return &ei->vfs_inode;
470} 472}
471 473
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 984ca0cb38c3..8825515eeddd 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -322,6 +322,7 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
322#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */ 322#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
323#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */ 323#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
324#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */ 324#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
325#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
325 326
326/* Used to pass group descriptor data when online resize is done */ 327/* Used to pass group descriptor data when online resize is done */
327struct ext4_new_group_input { 328struct ext4_new_group_input {
@@ -743,6 +744,7 @@ struct ext4_inode_info {
743#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ 744#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
744#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ 745#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
745#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ 746#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
747#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
746#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 748#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
747#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ 749#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
748#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ 750#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10539e364283..715264b4bae4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2807,6 +2807,8 @@ fix_extent_len:
2807 * into three uninitialized extent(at most). After IO complete, the part 2807 * into three uninitialized extent(at most). After IO complete, the part
2808 * being filled will be convert to initialized by the end_io callback function 2808 * being filled will be convert to initialized by the end_io callback function
2809 * via ext4_convert_unwritten_extents(). 2809 * via ext4_convert_unwritten_extents().
2810 *
2811 * Returns the size of uninitialized extent to be written on success.
2810 */ 2812 */
2811static int ext4_split_unwritten_extents(handle_t *handle, 2813static int ext4_split_unwritten_extents(handle_t *handle,
2812 struct inode *inode, 2814 struct inode *inode,
@@ -2824,7 +2826,6 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2824 unsigned int allocated, ee_len, depth; 2826 unsigned int allocated, ee_len, depth;
2825 ext4_fsblk_t newblock; 2827 ext4_fsblk_t newblock;
2826 int err = 0; 2828 int err = 0;
2827 int ret = 0;
2828 2829
2829 ext_debug("ext4_split_unwritten_extents: inode %lu," 2830 ext_debug("ext4_split_unwritten_extents: inode %lu,"
2830 "iblock %llu, max_blocks %u\n", inode->i_ino, 2831 "iblock %llu, max_blocks %u\n", inode->i_ino,
@@ -2842,12 +2843,12 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2842 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); 2843 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2843 2844
2844 /* 2845 /*
2845 * if the entire unintialized extent length less than 2846 * If the uninitialized extent begins at the same logical
2846 * the size of extent to write, there is no need to split 2847 * block where the write begins, and the write completely
2847 * uninitialized extent 2848 * covers the extent, then we don't need to split it.
2848 */ 2849 */
2849 if (allocated <= max_blocks) 2850 if ((iblock == ee_block) && (allocated <= max_blocks))
2850 return ret; 2851 return allocated;
2851 2852
2852 err = ext4_ext_get_access(handle, inode, path + depth); 2853 err = ext4_ext_get_access(handle, inode, path + depth);
2853 if (err) 2854 if (err)
@@ -3048,12 +3049,18 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3048 ret = ext4_split_unwritten_extents(handle, 3049 ret = ext4_split_unwritten_extents(handle,
3049 inode, path, iblock, 3050 inode, path, iblock,
3050 max_blocks, flags); 3051 max_blocks, flags);
3051 /* flag the io_end struct that we need convert when IO done */ 3052 /*
3053 * Flag the inode(non aio case) or end_io struct (aio case)
3054 * that this IO needs to convertion to written when IO is
3055 * completed
3056 */
3052 if (io) 3057 if (io)
3053 io->flag = DIO_AIO_UNWRITTEN; 3058 io->flag = DIO_AIO_UNWRITTEN;
3059 else
3060 EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
3054 goto out; 3061 goto out;
3055 } 3062 }
3056 /* DIO end_io complete, convert the filled extent to written */ 3063 /* async DIO end_io complete, convert the filled extent to written */
3057 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { 3064 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
3058 ret = ext4_convert_unwritten_extents_dio(handle, inode, 3065 ret = ext4_convert_unwritten_extents_dio(handle, inode,
3059 path); 3066 path);
@@ -3295,10 +3302,16 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3295 * To avoid unecessary convertion for every aio dio rewrite 3302 * To avoid unecessary convertion for every aio dio rewrite
3296 * to the mid of file, here we flag the IO that is really 3303 * to the mid of file, here we flag the IO that is really
3297 * need the convertion. 3304 * need the convertion.
3298 * 3305 * For non asycn direct IO case, flag the inode state
3306 * that we need to perform convertion when IO is done.
3299 */ 3307 */
3300 if (io && flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) 3308 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3301 io->flag = DIO_AIO_UNWRITTEN; 3309 if (io)
3310 io->flag = DIO_AIO_UNWRITTEN;
3311 else
3312 EXT4_I(inode)->i_state |=
3313 EXT4_STATE_DIO_UNWRITTEN;;
3314 }
3302 } 3315 }
3303 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3316 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3304 if (err) { 3317 if (err) {
@@ -3519,6 +3532,7 @@ retry:
3519 * 3532 *
3520 * This function is called from the direct IO end io call back 3533 * This function is called from the direct IO end io call back
3521 * function, to convert the fallocated extents after IO is completed. 3534 * function, to convert the fallocated extents after IO is completed.
3535 * Returns 0 on success.
3522 */ 3536 */
3523int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 3537int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3524 loff_t len) 3538 loff_t len)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5c5bc5dafff8..2c8caa51addb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -193,7 +193,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
193 * so before we call here everything must be consistently dirtied against 193 * so before we call here everything must be consistently dirtied against
194 * this transaction. 194 * this transaction.
195 */ 195 */
196 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 196int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
197 int nblocks) 197 int nblocks)
198{ 198{
199 int ret; 199 int ret;
@@ -209,6 +209,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
209 up_write(&EXT4_I(inode)->i_data_sem); 209 up_write(&EXT4_I(inode)->i_data_sem);
210 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 210 ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
211 down_write(&EXT4_I(inode)->i_data_sem); 211 down_write(&EXT4_I(inode)->i_data_sem);
212 ext4_discard_preallocations(inode);
212 213
213 return ret; 214 return ret;
214} 215}
@@ -3445,8 +3446,6 @@ out:
3445 return ret; 3446 return ret;
3446} 3447}
3447 3448
3448/* Maximum number of blocks we map for direct IO at once. */
3449
3450static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, 3449static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
3451 struct buffer_head *bh_result, int create) 3450 struct buffer_head *bh_result, int create)
3452{ 3451{
@@ -3654,13 +3653,14 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3654 ext4_io_end_t *io_end = iocb->private; 3653 ext4_io_end_t *io_end = iocb->private;
3655 struct workqueue_struct *wq; 3654 struct workqueue_struct *wq;
3656 3655
3656 /* if not async direct IO or dio with 0 bytes write, just return */
3657 if (!io_end || !size)
3658 return;
3659
3657 ext_debug("ext4_end_io_dio(): io_end 0x%p" 3660 ext_debug("ext4_end_io_dio(): io_end 0x%p"
3658 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 3661 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3659 iocb->private, io_end->inode->i_ino, iocb, offset, 3662 iocb->private, io_end->inode->i_ino, iocb, offset,
3660 size); 3663 size);
3661 /* if not async direct IO or dio with 0 bytes write, just return */
3662 if (!io_end || !size)
3663 return;
3664 3664
3665 /* if not aio dio with unwritten extents, just free io and return */ 3665 /* if not aio dio with unwritten extents, just free io and return */
3666 if (io_end->flag != DIO_AIO_UNWRITTEN){ 3666 if (io_end->flag != DIO_AIO_UNWRITTEN){
@@ -3771,13 +3771,19 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3771 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3771 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3772 ext4_free_io_end(iocb->private); 3772 ext4_free_io_end(iocb->private);
3773 iocb->private = NULL; 3773 iocb->private = NULL;
3774 } else if (ret > 0) 3774 } else if (ret > 0 && (EXT4_I(inode)->i_state &
3775 EXT4_STATE_DIO_UNWRITTEN)) {
3776 int err;
3775 /* 3777 /*
3776 * for non AIO case, since the IO is already 3778 * for non AIO case, since the IO is already
3777 * completed, we could do the convertion right here 3779 * completed, we could do the convertion right here
3778 */ 3780 */
3779 ret = ext4_convert_unwritten_extents(inode, 3781 err = ext4_convert_unwritten_extents(inode,
3780 offset, ret); 3782 offset, ret);
3783 if (err < 0)
3784 ret = err;
3785 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
3786 }
3781 return ret; 3787 return ret;
3782 } 3788 }
3783 3789
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7c8fe80bacdd..6d2c1b897fc7 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1518,12 +1518,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1518 return retval; 1518 return retval;
1519 1519
1520 if (blocks == 1 && !dx_fallback && 1520 if (blocks == 1 && !dx_fallback &&
1521 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { 1521 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
1522 retval = make_indexed_dir(handle, dentry, inode, bh); 1522 return make_indexed_dir(handle, dentry, inode, bh);
1523 if (retval == -ENOSPC)
1524 brelse(bh);
1525 return retval;
1526 }
1527 brelse(bh); 1523 brelse(bh);
1528 } 1524 }
1529 bh = ext4_append(handle, dir, &block, &retval); 1525 bh = ext4_append(handle, dir, &block, &retval);
@@ -1532,10 +1528,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1532 de = (struct ext4_dir_entry_2 *) bh->b_data; 1528 de = (struct ext4_dir_entry_2 *) bh->b_data;
1533 de->inode = 0; 1529 de->inode = 0;
1534 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); 1530 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
1535 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 1531 return add_dirent_to_buf(handle, dentry, inode, de, bh);
1536 if (retval == -ENOSPC)
1537 brelse(bh);
1538 return retval;
1539} 1532}
1540 1533
1541/* 1534/*
@@ -1664,8 +1657,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1664 if (!de) 1657 if (!de)
1665 goto cleanup; 1658 goto cleanup;
1666 err = add_dirent_to_buf(handle, dentry, inode, de, bh); 1659 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
1667 if (err != -ENOSPC) 1660 bh = NULL;
1668 bh = NULL;
1669 goto cleanup; 1661 goto cleanup;
1670 1662
1671journal_error: 1663journal_error:
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 312211ee05af..d4ca92aab514 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1300,9 +1300,11 @@ static int parse_options(char *options, struct super_block *sb,
1300 *journal_devnum = option; 1300 *journal_devnum = option;
1301 break; 1301 break;
1302 case Opt_journal_checksum: 1302 case Opt_journal_checksum:
1303 break; /* Kept for backwards compatibility */ 1303 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1304 break;
1304 case Opt_journal_async_commit: 1305 case Opt_journal_async_commit:
1305 set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT); 1306 set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
1307 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1306 break; 1308 break;
1307 case Opt_noload: 1309 case Opt_noload:
1308 set_opt(sbi->s_mount_opt, NOLOAD); 1310 set_opt(sbi->s_mount_opt, NOLOAD);
@@ -2759,14 +2761,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2759 goto failed_mount4; 2761 goto failed_mount4;
2760 } 2762 }
2761 2763
2762 jbd2_journal_set_features(sbi->s_journal, 2764 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
2763 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); 2765 jbd2_journal_set_features(sbi->s_journal,
2764 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) 2766 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2765 jbd2_journal_set_features(sbi->s_journal, 0, 0,
2766 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 2767 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2767 else 2768 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
2769 jbd2_journal_set_features(sbi->s_journal,
2770 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
2768 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 2771 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
2769 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 2772 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2773 } else {
2774 jbd2_journal_clear_features(sbi->s_journal,
2775 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2776 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2777 }
2770 2778
2771 /* We have now updated the journal if required, so we can 2779 /* We have now updated the journal if required, so we can
2772 * validate the data journaling mode. */ 2780 * validate the data journaling mode. */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index fc089f2f7f56..2cf93ec40a67 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -284,7 +284,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
284 type = PIDTYPE_PID; 284 type = PIDTYPE_PID;
285 break; 285 break;
286 286
287 case F_OWNER_GID: 287 case F_OWNER_PGRP:
288 type = PIDTYPE_PGID; 288 type = PIDTYPE_PGID;
289 break; 289 break;
290 290
@@ -321,7 +321,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
321 break; 321 break;
322 322
323 case PIDTYPE_PGID: 323 case PIDTYPE_PGID:
324 owner.type = F_OWNER_GID; 324 owner.type = F_OWNER_PGRP;
325 break; 325 break;
326 326
327 default: 327 default:
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 9bbb8ce7bea0..864dac20a242 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
54 enabled by setting bits in /sys/modules/fscache/parameter/debug. 54 enabled by setting bits in /sys/modules/fscache/parameter/debug.
55 55
56 See Documentation/filesystems/caching/fscache.txt for more information. 56 See Documentation/filesystems/caching/fscache.txt for more information.
57
58config FSCACHE_OBJECT_LIST
59 bool "Maintain global object list for debugging purposes"
60 depends on FSCACHE && PROC_FS
61 help
62 Maintain a global list of active fscache objects that can be
63 retrieved through /proc/fs/fscache/objects for debugging purposes
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 91571b95aacc..6d561531cb36 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -15,5 +15,6 @@ fscache-y := \
15fscache-$(CONFIG_PROC_FS) += proc.o 15fscache-$(CONFIG_PROC_FS) += proc.o
16fscache-$(CONFIG_FSCACHE_STATS) += stats.o 16fscache-$(CONFIG_FSCACHE_STATS) += stats.o
17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o 17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
18fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
18 19
19obj-$(CONFIG_FSCACHE) := fscache.o 20obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index e21985bbb1fb..6a3c48abd677 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
263 spin_lock(&cache->object_list_lock); 263 spin_lock(&cache->object_list_lock);
264 list_add_tail(&ifsdef->cache_link, &cache->object_list); 264 list_add_tail(&ifsdef->cache_link, &cache->object_list);
265 spin_unlock(&cache->object_list_lock); 265 spin_unlock(&cache->object_list_lock);
266 fscache_objlist_add(ifsdef);
266 267
267 /* add the cache's netfs definition index object to the top level index 268 /* add the cache's netfs definition index object to the top level index
268 * cookie as a known backing object */ 269 * cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
380 381
381 /* make sure all pages pinned by operations on behalf of the netfs are 382 /* make sure all pages pinned by operations on behalf of the netfs are
382 * written to disk */ 383 * written to disk */
384 fscache_stat(&fscache_n_cop_sync_cache);
383 cache->ops->sync_cache(cache); 385 cache->ops->sync_cache(cache);
386 fscache_stat_d(&fscache_n_cop_sync_cache);
384 387
385 /* dissociate all the netfs pages backed by this cache from the block 388 /* dissociate all the netfs pages backed by this cache from the block
386 * mappings in the cache */ 389 * mappings in the cache */
390 fscache_stat(&fscache_n_cop_dissociate_pages);
387 cache->ops->dissociate_pages(cache); 391 cache->ops->dissociate_pages(cache);
392 fscache_stat_d(&fscache_n_cop_dissociate_pages);
388 393
389 /* we now have to destroy all the active objects pertaining to this 394 /* we now have to destroy all the active objects pertaining to this
390 * cache - which we do by passing them off to thread pool to be 395 * cache - which we do by passing them off to thread pool to be
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 72fd18f6c71f..990535071a8a 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
36 36
37 memset(cookie, 0, sizeof(*cookie)); 37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock); 38 spin_lock_init(&cookie->lock);
39 spin_lock_init(&cookie->stores_lock);
39 INIT_HLIST_HEAD(&cookie->backing_objects); 40 INIT_HLIST_HEAD(&cookie->backing_objects);
40} 41}
41 42
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
102 cookie->netfs_data = netfs_data; 103 cookie->netfs_data = netfs_data;
103 cookie->flags = 0; 104 cookie->flags = 0;
104 105
105 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); 106 /* radix tree insertion won't use the preallocation pool unless it's
107 * told it may not wait */
108 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
106 109
107 switch (cookie->def->type) { 110 switch (cookie->def->type) {
108 case FSCACHE_COOKIE_TYPE_INDEX: 111 case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
249 252
250 /* ask the cache to allocate an object (we may end up with duplicate 253 /* ask the cache to allocate an object (we may end up with duplicate
251 * objects at this stage, but we sort that out later) */ 254 * objects at this stage, but we sort that out later) */
255 fscache_stat(&fscache_n_cop_alloc_object);
252 object = cache->ops->alloc_object(cache, cookie); 256 object = cache->ops->alloc_object(cache, cookie);
257 fscache_stat_d(&fscache_n_cop_alloc_object);
253 if (IS_ERR(object)) { 258 if (IS_ERR(object)) {
254 fscache_stat(&fscache_n_object_no_alloc); 259 fscache_stat(&fscache_n_object_no_alloc);
255 ret = PTR_ERR(object); 260 ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
270 /* only attach if we managed to allocate all we needed, otherwise 275 /* only attach if we managed to allocate all we needed, otherwise
271 * discard the object we just allocated and instead use the one 276 * discard the object we just allocated and instead use the one
272 * attached to the cookie */ 277 * attached to the cookie */
273 if (fscache_attach_object(cookie, object) < 0) 278 if (fscache_attach_object(cookie, object) < 0) {
279 fscache_stat(&fscache_n_cop_put_object);
274 cache->ops->put_object(object); 280 cache->ops->put_object(object);
281 fscache_stat_d(&fscache_n_cop_put_object);
282 }
275 283
276 _leave(" = 0"); 284 _leave(" = 0");
277 return 0; 285 return 0;
@@ -287,7 +295,9 @@ object_already_extant:
287 return 0; 295 return 0;
288 296
289error_put: 297error_put:
298 fscache_stat(&fscache_n_cop_put_object);
290 cache->ops->put_object(object); 299 cache->ops->put_object(object);
300 fscache_stat_d(&fscache_n_cop_put_object);
291error: 301error:
292 _leave(" = %d", ret); 302 _leave(" = %d", ret);
293 return ret; 303 return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
349 object->cookie = cookie; 359 object->cookie = cookie;
350 atomic_inc(&cookie->usage); 360 atomic_inc(&cookie->usage);
351 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 361 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
362
363 fscache_objlist_add(object);
352 ret = 0; 364 ret = 0;
353 365
354cant_attach_object: 366cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
403 unsigned long event; 415 unsigned long event;
404 416
405 fscache_stat(&fscache_n_relinquishes); 417 fscache_stat(&fscache_n_relinquishes);
418 if (retire)
419 fscache_stat(&fscache_n_relinquishes_retire);
406 420
407 if (!cookie) { 421 if (!cookie) {
408 fscache_stat(&fscache_n_relinquishes_null); 422 fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
428 442
429 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; 443 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
430 444
431 /* detach pointers back to the netfs */
432 spin_lock(&cookie->lock); 445 spin_lock(&cookie->lock);
433 446
434 cookie->netfs_data = NULL;
435 cookie->def = NULL;
436
437 /* break links with all the active objects */ 447 /* break links with all the active objects */
438 while (!hlist_empty(&cookie->backing_objects)) { 448 while (!hlist_empty(&cookie->backing_objects)) {
439 object = hlist_entry(cookie->backing_objects.first, 449 object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
456 BUG(); 466 BUG();
457 } 467 }
458 468
469 /* detach pointers back to the netfs */
470 cookie->netfs_data = NULL;
471 cookie->def = NULL;
472
459 spin_unlock(&cookie->lock); 473 spin_unlock(&cookie->lock);
460 474
461 if (cookie->parent) { 475 if (cookie->parent) {
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 1c341304621f..edd7434ab6e5 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -17,6 +17,7 @@
17 * - cache->object_list_lock 17 * - cache->object_list_lock
18 * - object->lock 18 * - object->lock
19 * - object->parent->lock 19 * - object->parent->lock
20 * - cookie->stores_lock
20 * - fscache_thread_lock 21 * - fscache_thread_lock
21 * 22 *
22 */ 23 */
@@ -88,17 +89,31 @@ extern int fscache_wait_bit_interruptible(void *);
88/* 89/*
89 * object.c 90 * object.c
90 */ 91 */
92extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
93
91extern void fscache_withdrawing_object(struct fscache_cache *, 94extern void fscache_withdrawing_object(struct fscache_cache *,
92 struct fscache_object *); 95 struct fscache_object *);
93extern void fscache_enqueue_object(struct fscache_object *); 96extern void fscache_enqueue_object(struct fscache_object *);
94 97
95/* 98/*
99 * object-list.c
100 */
101#ifdef CONFIG_FSCACHE_OBJECT_LIST
102extern const struct file_operations fscache_objlist_fops;
103
104extern void fscache_objlist_add(struct fscache_object *);
105#else
106#define fscache_objlist_add(object) do {} while(0)
107#endif
108
109/*
96 * operation.c 110 * operation.c
97 */ 111 */
98extern int fscache_submit_exclusive_op(struct fscache_object *, 112extern int fscache_submit_exclusive_op(struct fscache_object *,
99 struct fscache_operation *); 113 struct fscache_operation *);
100extern int fscache_submit_op(struct fscache_object *, 114extern int fscache_submit_op(struct fscache_object *,
101 struct fscache_operation *); 115 struct fscache_operation *);
116extern int fscache_cancel_op(struct fscache_operation *);
102extern void fscache_abort_object(struct fscache_object *); 117extern void fscache_abort_object(struct fscache_object *);
103extern void fscache_start_operations(struct fscache_object *); 118extern void fscache_start_operations(struct fscache_object *);
104extern void fscache_operation_gc(struct work_struct *); 119extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
127extern atomic_t fscache_n_op_deferred_release; 142extern atomic_t fscache_n_op_deferred_release;
128extern atomic_t fscache_n_op_release; 143extern atomic_t fscache_n_op_release;
129extern atomic_t fscache_n_op_gc; 144extern atomic_t fscache_n_op_gc;
145extern atomic_t fscache_n_op_cancelled;
146extern atomic_t fscache_n_op_rejected;
130 147
131extern atomic_t fscache_n_attr_changed; 148extern atomic_t fscache_n_attr_changed;
132extern atomic_t fscache_n_attr_changed_ok; 149extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
138extern atomic_t fscache_n_allocs_ok; 155extern atomic_t fscache_n_allocs_ok;
139extern atomic_t fscache_n_allocs_wait; 156extern atomic_t fscache_n_allocs_wait;
140extern atomic_t fscache_n_allocs_nobufs; 157extern atomic_t fscache_n_allocs_nobufs;
158extern atomic_t fscache_n_allocs_intr;
159extern atomic_t fscache_n_allocs_object_dead;
141extern atomic_t fscache_n_alloc_ops; 160extern atomic_t fscache_n_alloc_ops;
142extern atomic_t fscache_n_alloc_op_waits; 161extern atomic_t fscache_n_alloc_op_waits;
143 162
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
148extern atomic_t fscache_n_retrievals_nobufs; 167extern atomic_t fscache_n_retrievals_nobufs;
149extern atomic_t fscache_n_retrievals_intr; 168extern atomic_t fscache_n_retrievals_intr;
150extern atomic_t fscache_n_retrievals_nomem; 169extern atomic_t fscache_n_retrievals_nomem;
170extern atomic_t fscache_n_retrievals_object_dead;
151extern atomic_t fscache_n_retrieval_ops; 171extern atomic_t fscache_n_retrieval_ops;
152extern atomic_t fscache_n_retrieval_op_waits; 172extern atomic_t fscache_n_retrieval_op_waits;
153 173
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
158extern atomic_t fscache_n_stores_oom; 178extern atomic_t fscache_n_stores_oom;
159extern atomic_t fscache_n_store_ops; 179extern atomic_t fscache_n_store_ops;
160extern atomic_t fscache_n_store_calls; 180extern atomic_t fscache_n_store_calls;
181extern atomic_t fscache_n_store_pages;
182extern atomic_t fscache_n_store_radix_deletes;
183extern atomic_t fscache_n_store_pages_over_limit;
184
185extern atomic_t fscache_n_store_vmscan_not_storing;
186extern atomic_t fscache_n_store_vmscan_gone;
187extern atomic_t fscache_n_store_vmscan_busy;
188extern atomic_t fscache_n_store_vmscan_cancelled;
161 189
162extern atomic_t fscache_n_marks; 190extern atomic_t fscache_n_marks;
163extern atomic_t fscache_n_uncaches; 191extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
176extern atomic_t fscache_n_relinquishes; 204extern atomic_t fscache_n_relinquishes;
177extern atomic_t fscache_n_relinquishes_null; 205extern atomic_t fscache_n_relinquishes_null;
178extern atomic_t fscache_n_relinquishes_waitcrt; 206extern atomic_t fscache_n_relinquishes_waitcrt;
207extern atomic_t fscache_n_relinquishes_retire;
179 208
180extern atomic_t fscache_n_cookie_index; 209extern atomic_t fscache_n_cookie_index;
181extern atomic_t fscache_n_cookie_data; 210extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
186extern atomic_t fscache_n_object_lookups; 215extern atomic_t fscache_n_object_lookups;
187extern atomic_t fscache_n_object_lookups_negative; 216extern atomic_t fscache_n_object_lookups_negative;
188extern atomic_t fscache_n_object_lookups_positive; 217extern atomic_t fscache_n_object_lookups_positive;
218extern atomic_t fscache_n_object_lookups_timed_out;
189extern atomic_t fscache_n_object_created; 219extern atomic_t fscache_n_object_created;
190extern atomic_t fscache_n_object_avail; 220extern atomic_t fscache_n_object_avail;
191extern atomic_t fscache_n_object_dead; 221extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
195extern atomic_t fscache_n_checkaux_update; 225extern atomic_t fscache_n_checkaux_update;
196extern atomic_t fscache_n_checkaux_obsolete; 226extern atomic_t fscache_n_checkaux_obsolete;
197 227
228extern atomic_t fscache_n_cop_alloc_object;
229extern atomic_t fscache_n_cop_lookup_object;
230extern atomic_t fscache_n_cop_lookup_complete;
231extern atomic_t fscache_n_cop_grab_object;
232extern atomic_t fscache_n_cop_update_object;
233extern atomic_t fscache_n_cop_drop_object;
234extern atomic_t fscache_n_cop_put_object;
235extern atomic_t fscache_n_cop_sync_cache;
236extern atomic_t fscache_n_cop_attr_changed;
237extern atomic_t fscache_n_cop_read_or_alloc_page;
238extern atomic_t fscache_n_cop_read_or_alloc_pages;
239extern atomic_t fscache_n_cop_allocate_page;
240extern atomic_t fscache_n_cop_allocate_pages;
241extern atomic_t fscache_n_cop_write_page;
242extern atomic_t fscache_n_cop_uncache_page;
243extern atomic_t fscache_n_cop_dissociate_pages;
244
198static inline void fscache_stat(atomic_t *stat) 245static inline void fscache_stat(atomic_t *stat)
199{ 246{
200 atomic_inc(stat); 247 atomic_inc(stat);
201} 248}
202 249
250static inline void fscache_stat_d(atomic_t *stat)
251{
252 atomic_dec(stat);
253}
254
255#define __fscache_stat(stat) (stat)
256
203extern const struct file_operations fscache_stats_fops; 257extern const struct file_operations fscache_stats_fops;
204#else 258#else
205 259
260#define __fscache_stat(stat) (NULL)
206#define fscache_stat(stat) do {} while (0) 261#define fscache_stat(stat) do {} while (0)
262#define fscache_stat_d(stat) do {} while (0)
207#endif 263#endif
208 264
209/* 265/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b597499..add6bdb53f04 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
48{ 48{
49 int ret; 49 int ret;
50 50
51 ret = slow_work_register_user(); 51 ret = slow_work_register_user(THIS_MODULE);
52 if (ret < 0) 52 if (ret < 0)
53 goto error_slow_work; 53 goto error_slow_work;
54 54
@@ -80,7 +80,7 @@ error_kobj:
80error_cookie_jar: 80error_cookie_jar:
81 fscache_proc_cleanup(); 81 fscache_proc_cleanup();
82error_proc: 82error_proc:
83 slow_work_unregister_user(); 83 slow_work_unregister_user(THIS_MODULE);
84error_slow_work: 84error_slow_work:
85 return ret; 85 return ret;
86} 86}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
97 kobject_put(fscache_root); 97 kobject_put(fscache_root);
98 kmem_cache_destroy(fscache_cookie_jar); 98 kmem_cache_destroy(fscache_cookie_jar);
99 fscache_proc_cleanup(); 99 fscache_proc_cleanup();
100 slow_work_unregister_user(); 100 slow_work_unregister_user(THIS_MODULE);
101 printk(KERN_NOTICE "FS-Cache: Unloaded\n"); 101 printk(KERN_NOTICE "FS-Cache: Unloaded\n");
102} 102}
103 103
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644
index 000000000000..e590242fa41a
--- /dev/null
+++ b/fs/fscache/object-list.c
@@ -0,0 +1,432 @@
1/* Global fscache object list maintainer and viewer
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL COOKIE
13#include <linux/module.h>
14#include <linux/seq_file.h>
15#include <linux/key.h>
16#include <keys/user-type.h>
17#include "internal.h"
18
19static struct rb_root fscache_object_list;
20static DEFINE_RWLOCK(fscache_object_list_lock);
21
22struct fscache_objlist_data {
23 unsigned long config; /* display configuration */
24#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
25#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
26#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
27#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
28#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
29#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
30#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
31#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
32#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
33#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
34#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
35#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
36#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
37#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
38
39 u8 buf[512]; /* key and aux data buffer */
40};
41
42/*
43 * Add an object to the object list
44 * - we use the address of the fscache_object structure as the key into the
45 * tree
46 */
47void fscache_objlist_add(struct fscache_object *obj)
48{
49 struct fscache_object *xobj;
50 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
51
52 write_lock(&fscache_object_list_lock);
53
54 while (*p) {
55 parent = *p;
56 xobj = rb_entry(parent, struct fscache_object, objlist_link);
57
58 if (obj < xobj)
59 p = &(*p)->rb_left;
60 else if (obj > xobj)
61 p = &(*p)->rb_right;
62 else
63 BUG();
64 }
65
66 rb_link_node(&obj->objlist_link, parent, p);
67 rb_insert_color(&obj->objlist_link, &fscache_object_list);
68
69 write_unlock(&fscache_object_list_lock);
70}
71
72/**
73 * fscache_object_destroy - Note that a cache object is about to be destroyed
74 * @object: The object to be destroyed
75 *
76 * Note the imminent destruction and deallocation of a cache object record.
77 */
78void fscache_object_destroy(struct fscache_object *obj)
79{
80 write_lock(&fscache_object_list_lock);
81
82 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
83 rb_erase(&obj->objlist_link, &fscache_object_list);
84
85 write_unlock(&fscache_object_list_lock);
86}
87EXPORT_SYMBOL(fscache_object_destroy);
88
89/*
90 * find the object in the tree on or after the specified index
91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{
94 struct fscache_object *pobj, *obj, *minobj = NULL;
95 struct rb_node *p;
96 unsigned long pos;
97
98 if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
99 return NULL;
100 pos = *_pos;
101
102 /* banners (can't represent line 0 by pos 0 as that would involve
103 * returning a NULL pointer) */
104 if (pos == 0)
105 return (struct fscache_object *) ++(*_pos);
106 if (pos < 3)
107 return (struct fscache_object *)pos;
108
109 pobj = (struct fscache_object *)pos;
110 p = fscache_object_list.rb_node;
111 while (p) {
112 obj = rb_entry(p, struct fscache_object, objlist_link);
113 if (pobj < obj) {
114 if (!minobj || minobj > obj)
115 minobj = obj;
116 p = p->rb_left;
117 } else if (pobj > obj) {
118 p = p->rb_right;
119 } else {
120 minobj = obj;
121 break;
122 }
123 obj = NULL;
124 }
125
126 if (!minobj)
127 *_pos = (unsigned long) ERR_PTR(-ENOENT);
128 else if (minobj != obj)
129 *_pos = (unsigned long) minobj;
130 return minobj;
131}
132
133/*
134 * set up the iterator to start reading from the first line
135 */
136static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
137 __acquires(&fscache_object_list_lock)
138{
139 read_lock(&fscache_object_list_lock);
140 return fscache_objlist_lookup(_pos);
141}
142
143/*
144 * move to the next line
145 */
146static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
147{
148 (*_pos)++;
149 return fscache_objlist_lookup(_pos);
150}
151
152/*
153 * clean up after reading
154 */
155static void fscache_objlist_stop(struct seq_file *m, void *v)
156 __releases(&fscache_object_list_lock)
157{
158 read_unlock(&fscache_object_list_lock);
159}
160
161/*
162 * display an object
163 */
164static int fscache_objlist_show(struct seq_file *m, void *v)
165{
166 struct fscache_objlist_data *data = m->private;
167 struct fscache_object *obj = v;
168 unsigned long config = data->config;
169 uint16_t keylen, auxlen;
170 char _type[3], *type;
171 bool no_cookie;
172 u8 *buf = data->buf, *p;
173
174 if ((unsigned long) v == 1) {
175 seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
176 " EM EV F S"
177 " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
178 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
179 FSCACHE_OBJLIST_CONFIG_AUX))
180 seq_puts(m, " ");
181 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
182 seq_puts(m, "OBJECT_KEY");
183 if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
184 FSCACHE_OBJLIST_CONFIG_AUX)) ==
185 (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
186 seq_puts(m, ", ");
187 if (config & FSCACHE_OBJLIST_CONFIG_AUX)
188 seq_puts(m, "AUX_DATA");
189 seq_puts(m, "\n");
190 return 0;
191 }
192
193 if ((unsigned long) v == 2) {
194 seq_puts(m, "======== ======== ==== ===== === === === == ====="
195 " == == = ="
196 " | ================ == == ================");
197 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
198 FSCACHE_OBJLIST_CONFIG_AUX))
199 seq_puts(m, " ================");
200 seq_puts(m, "\n");
201 return 0;
202 }
203
204 /* filter out any unwanted objects */
205#define FILTER(criterion, _yes, _no) \
206 do { \
207 unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
208 unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
209 if (criterion) { \
210 if (!(config & yes)) \
211 return 0; \
212 } else { \
213 if (!(config & no)) \
214 return 0; \
215 } \
216 } while(0)
217
218 if (~config) {
219 FILTER(obj->cookie,
220 COOKIE, NOCOOKIE);
221 FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
222 obj->n_ops != 0 ||
223 obj->n_obj_ops != 0 ||
224 obj->flags ||
225 !list_empty(&obj->dependents),
226 BUSY, IDLE);
227 FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
228 PENDWR, NOPENDWR);
229 FILTER(atomic_read(&obj->n_reads),
230 READS, NOREADS);
231 FILTER(obj->events & obj->event_mask,
232 EVENTS, NOEVENTS);
233 FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
234 WORK, NOWORK);
235 }
236
237 seq_printf(m,
238 "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
239 obj->debug_id,
240 obj->parent ? obj->parent->debug_id : -1,
241 fscache_object_states_short[obj->state],
242 obj->n_children,
243 obj->n_ops,
244 obj->n_obj_ops,
245 obj->n_in_progress,
246 obj->n_exclusive,
247 atomic_read(&obj->n_reads),
248 obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
249 obj->events,
250 obj->flags,
251 obj->work.flags);
252
253 no_cookie = true;
254 keylen = auxlen = 0;
255 if (obj->cookie) {
256 spin_lock(&obj->lock);
257 if (obj->cookie) {
258 switch (obj->cookie->def->type) {
259 case 0:
260 type = "IX";
261 break;
262 case 1:
263 type = "DT";
264 break;
265 default:
266 sprintf(_type, "%02u",
267 obj->cookie->def->type);
268 type = _type;
269 break;
270 }
271
272 seq_printf(m, "%-16s %s %2lx %16p",
273 obj->cookie->def->name,
274 type,
275 obj->cookie->flags,
276 obj->cookie->netfs_data);
277
278 if (obj->cookie->def->get_key &&
279 config & FSCACHE_OBJLIST_CONFIG_KEY)
280 keylen = obj->cookie->def->get_key(
281 obj->cookie->netfs_data,
282 buf, 400);
283
284 if (obj->cookie->def->get_aux &&
285 config & FSCACHE_OBJLIST_CONFIG_AUX)
286 auxlen = obj->cookie->def->get_aux(
287 obj->cookie->netfs_data,
288 buf + keylen, 512 - keylen);
289
290 no_cookie = false;
291 }
292 spin_unlock(&obj->lock);
293
294 if (!no_cookie && (keylen > 0 || auxlen > 0)) {
295 seq_printf(m, " ");
296 for (p = buf; keylen > 0; keylen--)
297 seq_printf(m, "%02x", *p++);
298 if (auxlen > 0) {
299 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
300 seq_printf(m, ", ");
301 for (; auxlen > 0; auxlen--)
302 seq_printf(m, "%02x", *p++);
303 }
304 }
305 }
306
307 if (no_cookie)
308 seq_printf(m, "<no_cookie>\n");
309 else
310 seq_printf(m, "\n");
311 return 0;
312}
313
314static const struct seq_operations fscache_objlist_ops = {
315 .start = fscache_objlist_start,
316 .stop = fscache_objlist_stop,
317 .next = fscache_objlist_next,
318 .show = fscache_objlist_show,
319};
320
321/*
322 * get the configuration for filtering the list
323 */
324static void fscache_objlist_config(struct fscache_objlist_data *data)
325{
326#ifdef CONFIG_KEYS
327 struct user_key_payload *confkey;
328 unsigned long config;
329 struct key *key;
330 const char *buf;
331 int len;
332
333 key = request_key(&key_type_user, "fscache:objlist", NULL);
334 if (IS_ERR(key))
335 goto no_config;
336
337 config = 0;
338 rcu_read_lock();
339
340 confkey = key->payload.data;
341 buf = confkey->data;
342
343 for (len = confkey->datalen - 1; len >= 0; len--) {
344 switch (buf[len]) {
345 case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
346 case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
347 case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
348 case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
349 case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
350 case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
351 case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
352 case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
353 case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
354 case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
355 case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
356 case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
357 }
358 }
359
360 rcu_read_unlock();
361 key_put(key);
362
363 if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
364 config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
365 if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
366 config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
367 if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
368 config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
369 if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
370 config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
371 if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
372 config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
373 if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
374 config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
375
376 data->config = config;
377 return;
378
379no_config:
380#endif
381 data->config = ULONG_MAX;
382}
383
384/*
385 * open "/proc/fs/fscache/objects" to provide a list of active objects
386 * - can be configured by a user-defined key added to the caller's keyrings
387 */
388static int fscache_objlist_open(struct inode *inode, struct file *file)
389{
390 struct fscache_objlist_data *data;
391 struct seq_file *m;
392 int ret;
393
394 ret = seq_open(file, &fscache_objlist_ops);
395 if (ret < 0)
396 return ret;
397
398 m = file->private_data;
399
400 /* buffer for key extraction */
401 data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
402 if (!data) {
403 seq_release(inode, file);
404 return -ENOMEM;
405 }
406
407 /* get the configuration key */
408 fscache_objlist_config(data);
409
410 m->private = data;
411 return 0;
412}
413
414/*
415 * clean up on close
416 */
417static int fscache_objlist_release(struct inode *inode, struct file *file)
418{
419 struct seq_file *m = file->private_data;
420
421 kfree(m->private);
422 m->private = NULL;
423 return seq_release(inode, file);
424}
425
426const struct file_operations fscache_objlist_fops = {
427 .owner = THIS_MODULE,
428 .open = fscache_objlist_open,
429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = fscache_objlist_release,
432};
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79d..e513ac599c8e 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,9 +14,10 @@
14 14
15#define FSCACHE_DEBUG_LEVEL COOKIE 15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/seq_file.h>
17#include "internal.h" 18#include "internal.h"
18 19
19const char *fscache_object_states[] = { 20const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
20 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", 21 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
21 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", 22 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
22 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", 23 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
33}; 34};
34EXPORT_SYMBOL(fscache_object_states); 35EXPORT_SYMBOL(fscache_object_states);
35 36
37const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
38 [FSCACHE_OBJECT_INIT] = "INIT",
39 [FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
40 [FSCACHE_OBJECT_CREATING] = "CRTN",
41 [FSCACHE_OBJECT_AVAILABLE] = "AVBL",
42 [FSCACHE_OBJECT_ACTIVE] = "ACTV",
43 [FSCACHE_OBJECT_UPDATING] = "UPDT",
44 [FSCACHE_OBJECT_DYING] = "DYNG",
45 [FSCACHE_OBJECT_LC_DYING] = "LCDY",
46 [FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
47 [FSCACHE_OBJECT_RELEASING] = "RELS",
48 [FSCACHE_OBJECT_RECYCLING] = "RCYC",
49 [FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
50 [FSCACHE_OBJECT_DEAD] = "DEAD",
51};
52
36static void fscache_object_slow_work_put_ref(struct slow_work *); 53static void fscache_object_slow_work_put_ref(struct slow_work *);
37static int fscache_object_slow_work_get_ref(struct slow_work *); 54static int fscache_object_slow_work_get_ref(struct slow_work *);
38static void fscache_object_slow_work_execute(struct slow_work *); 55static void fscache_object_slow_work_execute(struct slow_work *);
56#ifdef CONFIG_SLOW_WORK_PROC
57static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
58#endif
39static void fscache_initialise_object(struct fscache_object *); 59static void fscache_initialise_object(struct fscache_object *);
40static void fscache_lookup_object(struct fscache_object *); 60static void fscache_lookup_object(struct fscache_object *);
41static void fscache_object_available(struct fscache_object *); 61static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
45static void fscache_dequeue_object(struct fscache_object *); 65static void fscache_dequeue_object(struct fscache_object *);
46 66
47const struct slow_work_ops fscache_object_slow_work_ops = { 67const struct slow_work_ops fscache_object_slow_work_ops = {
68 .owner = THIS_MODULE,
48 .get_ref = fscache_object_slow_work_get_ref, 69 .get_ref = fscache_object_slow_work_get_ref,
49 .put_ref = fscache_object_slow_work_put_ref, 70 .put_ref = fscache_object_slow_work_put_ref,
50 .execute = fscache_object_slow_work_execute, 71 .execute = fscache_object_slow_work_execute,
72#ifdef CONFIG_SLOW_WORK_PROC
73 .desc = fscache_object_slow_work_desc,
74#endif
51}; 75};
52EXPORT_SYMBOL(fscache_object_slow_work_ops); 76EXPORT_SYMBOL(fscache_object_slow_work_ops);
53 77
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
81static void fscache_object_state_machine(struct fscache_object *object) 105static void fscache_object_state_machine(struct fscache_object *object)
82{ 106{
83 enum fscache_object_state new_state; 107 enum fscache_object_state new_state;
108 struct fscache_cookie *cookie;
84 109
85 ASSERT(object != NULL); 110 ASSERT(object != NULL);
86 111
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
120 case FSCACHE_OBJECT_UPDATING: 145 case FSCACHE_OBJECT_UPDATING:
121 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); 146 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
122 fscache_stat(&fscache_n_updates_run); 147 fscache_stat(&fscache_n_updates_run);
148 fscache_stat(&fscache_n_cop_update_object);
123 object->cache->ops->update_object(object); 149 object->cache->ops->update_object(object);
150 fscache_stat_d(&fscache_n_cop_update_object);
124 goto active_transit; 151 goto active_transit;
125 152
126 /* handle an object dying during lookup or creation */ 153 /* handle an object dying during lookup or creation */
127 case FSCACHE_OBJECT_LC_DYING: 154 case FSCACHE_OBJECT_LC_DYING:
128 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); 155 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
156 fscache_stat(&fscache_n_cop_lookup_complete);
129 object->cache->ops->lookup_complete(object); 157 object->cache->ops->lookup_complete(object);
158 fscache_stat_d(&fscache_n_cop_lookup_complete);
130 159
131 spin_lock(&object->lock); 160 spin_lock(&object->lock);
132 object->state = FSCACHE_OBJECT_DYING; 161 object->state = FSCACHE_OBJECT_DYING;
133 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, 162 cookie = object->cookie;
134 &object->cookie->flags)) 163 if (cookie) {
135 wake_up_bit(&object->cookie->flags, 164 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
136 FSCACHE_COOKIE_CREATING); 165 &cookie->flags))
166 wake_up_bit(&cookie->flags,
167 FSCACHE_COOKIE_LOOKING_UP);
168 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
169 &cookie->flags))
170 wake_up_bit(&cookie->flags,
171 FSCACHE_COOKIE_CREATING);
172 }
137 spin_unlock(&object->lock); 173 spin_unlock(&object->lock);
138 174
139 fscache_done_parent_op(object); 175 fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
165 } 201 }
166 spin_unlock(&object->lock); 202 spin_unlock(&object->lock);
167 fscache_enqueue_dependents(object); 203 fscache_enqueue_dependents(object);
204 fscache_start_operations(object);
168 goto terminal_transit; 205 goto terminal_transit;
169 206
170 /* handle an abort during initialisation */ 207 /* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
316 353
317 _enter("{OBJ%x}", object->debug_id); 354 _enter("{OBJ%x}", object->debug_id);
318 355
319 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
320
321 start = jiffies; 356 start = jiffies;
322 fscache_object_state_machine(object); 357 fscache_object_state_machine(object);
323 fscache_hist(fscache_objs_histogram, start); 358 fscache_hist(fscache_objs_histogram, start);
324 if (object->events & object->event_mask) 359 if (object->events & object->event_mask)
325 fscache_enqueue_object(object); 360 fscache_enqueue_object(object);
361 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
362}
363
364/*
365 * describe an object for slow-work debugging
366 */
367#ifdef CONFIG_SLOW_WORK_PROC
368static void fscache_object_slow_work_desc(struct slow_work *work,
369 struct seq_file *m)
370{
371 struct fscache_object *object =
372 container_of(work, struct fscache_object, work);
373
374 seq_printf(m, "FSC: OBJ%x: %s",
375 object->debug_id,
376 fscache_object_states_short[object->state]);
326} 377}
378#endif
327 379
328/* 380/*
329 * initialise an object 381 * initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
376 * binding on to us, so we need to make sure we don't 428 * binding on to us, so we need to make sure we don't
377 * add ourself to the list multiple times */ 429 * add ourself to the list multiple times */
378 if (list_empty(&object->dep_link)) { 430 if (list_empty(&object->dep_link)) {
431 fscache_stat(&fscache_n_cop_grab_object);
379 object->cache->ops->grab_object(object); 432 object->cache->ops->grab_object(object);
433 fscache_stat_d(&fscache_n_cop_grab_object);
380 list_add(&object->dep_link, 434 list_add(&object->dep_link,
381 &parent->dependents); 435 &parent->dependents);
382 436
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
414{ 468{
415 struct fscache_cookie *cookie = object->cookie; 469 struct fscache_cookie *cookie = object->cookie;
416 struct fscache_object *parent; 470 struct fscache_object *parent;
471 int ret;
417 472
418 _enter(""); 473 _enter("");
419 474
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
438 object->cache->tag->name); 493 object->cache->tag->name);
439 494
440 fscache_stat(&fscache_n_object_lookups); 495 fscache_stat(&fscache_n_object_lookups);
441 object->cache->ops->lookup_object(object); 496 fscache_stat(&fscache_n_cop_lookup_object);
497 ret = object->cache->ops->lookup_object(object);
498 fscache_stat_d(&fscache_n_cop_lookup_object);
442 499
443 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) 500 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
444 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); 501 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
445 502
503 if (ret == -ETIMEDOUT) {
504 /* probably stuck behind another object, so move this one to
505 * the back of the queue */
506 fscache_stat(&fscache_n_object_lookups_timed_out);
507 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
508 }
509
446 _leave(""); 510 _leave("");
447} 511}
448 512
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
546 610
547 spin_lock(&object->lock); 611 spin_lock(&object->lock);
548 612
549 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) 613 if (object->cookie &&
614 test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
550 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); 615 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
551 616
552 fscache_done_parent_op(object); 617 fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
562 } 627 }
563 spin_unlock(&object->lock); 628 spin_unlock(&object->lock);
564 629
630 fscache_stat(&fscache_n_cop_lookup_complete);
565 object->cache->ops->lookup_complete(object); 631 object->cache->ops->lookup_complete(object);
632 fscache_stat_d(&fscache_n_cop_lookup_complete);
566 fscache_enqueue_dependents(object); 633 fscache_enqueue_dependents(object);
567 634
568 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); 635 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
581 648
582 _enter("{OBJ%x,%d}", object->debug_id, object->n_children); 649 _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
583 650
651 ASSERTCMP(object->cookie, ==, NULL);
652 ASSERT(hlist_unhashed(&object->cookie_link));
653
584 spin_lock(&cache->object_list_lock); 654 spin_lock(&cache->object_list_lock);
585 list_del_init(&object->cache_link); 655 list_del_init(&object->cache_link);
586 spin_unlock(&cache->object_list_lock); 656 spin_unlock(&cache->object_list_lock);
587 657
658 fscache_stat(&fscache_n_cop_drop_object);
588 cache->ops->drop_object(object); 659 cache->ops->drop_object(object);
660 fscache_stat_d(&fscache_n_cop_drop_object);
589 661
590 if (parent) { 662 if (parent) {
591 _debug("release parent OBJ%x {%d}", 663 _debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
600 } 672 }
601 673
602 /* this just shifts the object release to the slow work processor */ 674 /* this just shifts the object release to the slow work processor */
675 fscache_stat(&fscache_n_cop_put_object);
603 object->cache->ops->put_object(object); 676 object->cache->ops->put_object(object);
677 fscache_stat_d(&fscache_n_cop_put_object);
604 678
605 _leave(""); 679 _leave("");
606} 680}
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
690{ 764{
691 struct fscache_object *object = 765 struct fscache_object *object =
692 container_of(work, struct fscache_object, work); 766 container_of(work, struct fscache_object, work);
767 int ret;
693 768
694 return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; 769 fscache_stat(&fscache_n_cop_grab_object);
770 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
771 fscache_stat_d(&fscache_n_cop_grab_object);
772 return ret;
695} 773}
696 774
697/* 775/*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
702 struct fscache_object *object = 780 struct fscache_object *object =
703 container_of(work, struct fscache_object, work); 781 container_of(work, struct fscache_object, work);
704 782
705 return object->cache->ops->put_object(object); 783 fscache_stat(&fscache_n_cop_put_object);
784 object->cache->ops->put_object(object);
785 fscache_stat_d(&fscache_n_cop_put_object);
706} 786}
707 787
708/* 788/*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
739 819
740 /* sort onto appropriate lists */ 820 /* sort onto appropriate lists */
741 fscache_enqueue_object(dep); 821 fscache_enqueue_object(dep);
822 fscache_stat(&fscache_n_cop_put_object);
742 dep->cache->ops->put_object(dep); 823 dep->cache->ops->put_object(dep);
824 fscache_stat_d(&fscache_n_cop_put_object);
743 825
744 if (!list_empty(&object->dependents)) 826 if (!list_empty(&object->dependents))
745 cond_resched_lock(&object->lock); 827 cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6b..313e79a14266 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -13,6 +13,7 @@
13 13
14#define FSCACHE_DEBUG_LEVEL OPERATION 14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/seq_file.h>
16#include "internal.h" 17#include "internal.h"
17 18
18atomic_t fscache_op_debug_id; 19atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
31 _enter("{OBJ%x OP%x,%u}", 32 _enter("{OBJ%x OP%x,%u}",
32 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 33 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
33 34
35 fscache_set_op_state(op, "EnQ");
36
37 ASSERT(list_empty(&op->pend_link));
34 ASSERT(op->processor != NULL); 38 ASSERT(op->processor != NULL);
35 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 39 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36 ASSERTCMP(atomic_read(&op->usage), >, 0); 40 ASSERTCMP(atomic_read(&op->usage), >, 0);
37 41
38 if (list_empty(&op->pend_link)) { 42 fscache_stat(&fscache_n_op_enqueue);
39 switch (op->flags & FSCACHE_OP_TYPE) { 43 switch (op->flags & FSCACHE_OP_TYPE) {
40 case FSCACHE_OP_FAST: 44 case FSCACHE_OP_FAST:
41 _debug("queue fast"); 45 _debug("queue fast");
42 atomic_inc(&op->usage); 46 atomic_inc(&op->usage);
43 if (!schedule_work(&op->fast_work)) 47 if (!schedule_work(&op->fast_work))
44 fscache_put_operation(op); 48 fscache_put_operation(op);
45 break; 49 break;
46 case FSCACHE_OP_SLOW: 50 case FSCACHE_OP_SLOW:
47 _debug("queue slow"); 51 _debug("queue slow");
48 slow_work_enqueue(&op->slow_work); 52 slow_work_enqueue(&op->slow_work);
49 break; 53 break;
50 case FSCACHE_OP_MYTHREAD: 54 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention"); 55 _debug("queue for caller's attention");
52 break; 56 break;
53 default: 57 default:
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx", 58 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
55 op->flags); 59 op->flags);
56 BUG(); 60 BUG();
57 break; 61 break;
58 }
59 fscache_stat(&fscache_n_op_enqueue);
60 } 62 }
61} 63}
62EXPORT_SYMBOL(fscache_enqueue_operation); 64EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
67static void fscache_run_op(struct fscache_object *object, 69static void fscache_run_op(struct fscache_object *object,
68 struct fscache_operation *op) 70 struct fscache_operation *op)
69{ 71{
72 fscache_set_op_state(op, "Run");
73
70 object->n_in_progress++; 74 object->n_in_progress++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 75 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
72 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 76 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
87 91
88 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 92 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
89 93
94 fscache_set_op_state(op, "SubmitX");
95
90 spin_lock(&object->lock); 96 spin_lock(&object->lock);
91 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 97 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
92 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 98 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
99 ASSERT(list_empty(&op->pend_link));
93 100
94 ret = -ENOBUFS; 101 ret = -ENOBUFS;
95 if (fscache_object_is_active(object)) { 102 if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
190 197
191 ASSERTCMP(atomic_read(&op->usage), >, 0); 198 ASSERTCMP(atomic_read(&op->usage), >, 0);
192 199
200 fscache_set_op_state(op, "Submit");
201
193 spin_lock(&object->lock); 202 spin_lock(&object->lock);
194 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 203 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
195 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 204 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
205 ASSERT(list_empty(&op->pend_link));
196 206
197 ostate = object->state; 207 ostate = object->state;
198 smp_rmb(); 208 smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
222 list_add_tail(&op->pend_link, &object->pending_ops); 232 list_add_tail(&op->pend_link, &object->pending_ops);
223 fscache_stat(&fscache_n_op_pend); 233 fscache_stat(&fscache_n_op_pend);
224 ret = 0; 234 ret = 0;
235 } else if (object->state == FSCACHE_OBJECT_DYING ||
236 object->state == FSCACHE_OBJECT_LC_DYING ||
237 object->state == FSCACHE_OBJECT_WITHDRAWING) {
238 fscache_stat(&fscache_n_op_rejected);
239 ret = -ENOBUFS;
225 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { 240 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
226 fscache_report_unexpected_submission(object, op, ostate); 241 fscache_report_unexpected_submission(object, op, ostate);
227 ASSERT(!fscache_object_is_active(object)); 242 ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
264 stop = true; 279 stop = true;
265 } 280 }
266 list_del_init(&op->pend_link); 281 list_del_init(&op->pend_link);
267 object->n_in_progress++; 282 fscache_run_op(object, op);
268
269 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
270 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
271 if (op->processor)
272 fscache_enqueue_operation(op);
273 283
274 /* the pending queue was holding a ref on the object */ 284 /* the pending queue was holding a ref on the object */
275 fscache_put_operation(op); 285 fscache_put_operation(op);
@@ -282,6 +292,36 @@ void fscache_start_operations(struct fscache_object *object)
282} 292}
283 293
284/* 294/*
295 * cancel an operation that's pending on an object
296 */
297int fscache_cancel_op(struct fscache_operation *op)
298{
299 struct fscache_object *object = op->object;
300 int ret;
301
302 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
303
304 spin_lock(&object->lock);
305
306 ret = -EBUSY;
307 if (!list_empty(&op->pend_link)) {
308 fscache_stat(&fscache_n_op_cancelled);
309 list_del_init(&op->pend_link);
310 object->n_ops--;
311 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
312 object->n_exclusive--;
313 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
314 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
315 fscache_put_operation(op);
316 ret = 0;
317 }
318
319 spin_unlock(&object->lock);
320 _leave(" = %d", ret);
321 return ret;
322}
323
324/*
285 * release an operation 325 * release an operation
286 * - queues pending ops if this is the last in-progress op 326 * - queues pending ops if this is the last in-progress op
287 */ 327 */
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
298 if (!atomic_dec_and_test(&op->usage)) 338 if (!atomic_dec_and_test(&op->usage))
299 return; 339 return;
300 340
341 fscache_set_op_state(op, "Put");
342
301 _debug("PUT OP"); 343 _debug("PUT OP");
302 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 344 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
303 BUG(); 345 BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
311 353
312 object = op->object; 354 object = op->object;
313 355
356 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
357 atomic_dec(&object->n_reads);
358
314 /* now... we may get called with the object spinlock held, so we 359 /* now... we may get called with the object spinlock held, so we
315 * complete the cleanup here only if we can immediately acquire the 360 * complete the cleanup here only if we can immediately acquire the
316 * lock, and defer it otherwise */ 361 * lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
452 _leave(""); 497 _leave("");
453} 498}
454 499
500/*
501 * describe an operation for slow-work debugging
502 */
503#ifdef CONFIG_SLOW_WORK_PROC
504static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
505{
506 struct fscache_operation *op =
507 container_of(work, struct fscache_operation, slow_work);
508
509 seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
510 op->object->debug_id, op->debug_id,
511 op->name, op->state, op->flags);
512}
513#endif
514
455const struct slow_work_ops fscache_op_slow_work_ops = { 515const struct slow_work_ops fscache_op_slow_work_ops = {
516 .owner = THIS_MODULE,
456 .get_ref = fscache_op_get_ref, 517 .get_ref = fscache_op_get_ref,
457 .put_ref = fscache_op_put_ref, 518 .put_ref = fscache_op_put_ref,
458 .execute = fscache_op_execute, 519 .execute = fscache_op_execute,
520#ifdef CONFIG_SLOW_WORK_PROC
521 .desc = fscache_op_desc,
522#endif
459}; 523};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2568e0eb644f..c598ea4c4e7d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
43EXPORT_SYMBOL(__fscache_wait_on_page_write); 43EXPORT_SYMBOL(__fscache_wait_on_page_write);
44 44
45/* 45/*
46 * note that a page has finished being written to the cache 46 * decide whether a page can be released, possibly by cancelling a store to it
47 * - we're allowed to sleep if __GFP_WAIT is flagged
47 */ 48 */
48static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) 49bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50 struct page *page,
51 gfp_t gfp)
49{ 52{
50 struct page *xpage; 53 struct page *xpage;
54 void *val;
55
56 _enter("%p,%p,%x", cookie, page, gfp);
57
58 rcu_read_lock();
59 val = radix_tree_lookup(&cookie->stores, page->index);
60 if (!val) {
61 rcu_read_unlock();
62 fscache_stat(&fscache_n_store_vmscan_not_storing);
63 __fscache_uncache_page(cookie, page);
64 return true;
65 }
66
67 /* see if the page is actually undergoing storage - if so we can't get
68 * rid of it till the cache has finished with it */
69 if (radix_tree_tag_get(&cookie->stores, page->index,
70 FSCACHE_COOKIE_STORING_TAG)) {
71 rcu_read_unlock();
72 goto page_busy;
73 }
74
75 /* the page is pending storage, so we attempt to cancel the store and
76 * discard the store request so that the page can be reclaimed */
77 spin_lock(&cookie->stores_lock);
78 rcu_read_unlock();
79
80 if (radix_tree_tag_get(&cookie->stores, page->index,
81 FSCACHE_COOKIE_STORING_TAG)) {
82 /* the page started to undergo storage whilst we were looking,
83 * so now we can only wait or return */
84 spin_unlock(&cookie->stores_lock);
85 goto page_busy;
86 }
51 87
52 spin_lock(&cookie->lock);
53 xpage = radix_tree_delete(&cookie->stores, page->index); 88 xpage = radix_tree_delete(&cookie->stores, page->index);
54 spin_unlock(&cookie->lock); 89 spin_unlock(&cookie->stores_lock);
55 ASSERT(xpage != NULL); 90
91 if (xpage) {
92 fscache_stat(&fscache_n_store_vmscan_cancelled);
93 fscache_stat(&fscache_n_store_radix_deletes);
94 ASSERTCMP(xpage, ==, page);
95 } else {
96 fscache_stat(&fscache_n_store_vmscan_gone);
97 }
56 98
57 wake_up_bit(&cookie->flags, 0); 99 wake_up_bit(&cookie->flags, 0);
100 if (xpage)
101 page_cache_release(xpage);
102 __fscache_uncache_page(cookie, page);
103 return true;
104
105page_busy:
106 /* we might want to wait here, but that could deadlock the allocator as
107 * the slow-work threads writing to the cache may all end up sleeping
108 * on memory allocation */
109 fscache_stat(&fscache_n_store_vmscan_busy);
110 return false;
111}
112EXPORT_SYMBOL(__fscache_maybe_release_page);
113
114/*
115 * note that a page has finished being written to the cache
116 */
117static void fscache_end_page_write(struct fscache_object *object,
118 struct page *page)
119{
120 struct fscache_cookie *cookie;
121 struct page *xpage = NULL;
122
123 spin_lock(&object->lock);
124 cookie = object->cookie;
125 if (cookie) {
126 /* delete the page from the tree if it is now no longer
127 * pending */
128 spin_lock(&cookie->stores_lock);
129 radix_tree_tag_clear(&cookie->stores, page->index,
130 FSCACHE_COOKIE_STORING_TAG);
131 if (!radix_tree_tag_get(&cookie->stores, page->index,
132 FSCACHE_COOKIE_PENDING_TAG)) {
133 fscache_stat(&fscache_n_store_radix_deletes);
134 xpage = radix_tree_delete(&cookie->stores, page->index);
135 }
136 spin_unlock(&cookie->stores_lock);
137 wake_up_bit(&cookie->flags, 0);
138 }
139 spin_unlock(&object->lock);
140 if (xpage)
141 page_cache_release(xpage);
58} 142}
59 143
60/* 144/*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
63static void fscache_attr_changed_op(struct fscache_operation *op) 147static void fscache_attr_changed_op(struct fscache_operation *op)
64{ 148{
65 struct fscache_object *object = op->object; 149 struct fscache_object *object = op->object;
150 int ret;
66 151
67 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 152 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68 153
69 fscache_stat(&fscache_n_attr_changed_calls); 154 fscache_stat(&fscache_n_attr_changed_calls);
70 155
71 if (fscache_object_is_active(object) && 156 if (fscache_object_is_active(object)) {
72 object->cache->ops->attr_changed(object) < 0) 157 fscache_set_op_state(op, "CallFS");
73 fscache_abort_object(object); 158 fscache_stat(&fscache_n_cop_attr_changed);
159 ret = object->cache->ops->attr_changed(object);
160 fscache_stat_d(&fscache_n_cop_attr_changed);
161 fscache_set_op_state(op, "Done");
162 if (ret < 0)
163 fscache_abort_object(object);
164 }
74 165
75 _leave(""); 166 _leave("");
76} 167}
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
99 fscache_operation_init(op, NULL); 190 fscache_operation_init(op, NULL);
100 fscache_operation_init_slow(op, fscache_attr_changed_op); 191 fscache_operation_init_slow(op, fscache_attr_changed_op);
101 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); 192 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
102 194
103 spin_lock(&cookie->lock); 195 spin_lock(&cookie->lock);
104 196
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
184 op->start_time = jiffies; 276 op->start_time = jiffies;
185 INIT_WORK(&op->op.fast_work, fscache_retrieval_work); 277 INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
186 INIT_LIST_HEAD(&op->to_do); 278 INIT_LIST_HEAD(&op->to_do);
279 fscache_set_op_name(&op->op, "Retr");
187 return op; 280 return op;
188} 281}
189 282
@@ -221,6 +314,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
221} 314}
222 315
223/* 316/*
317 * wait for an object to become active (or dead)
318 */
319static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 struct fscache_retrieval *op,
321 atomic_t *stat_op_waits,
322 atomic_t *stat_object_dead)
323{
324 int ret;
325
326 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
327 goto check_if_dead;
328
329 _debug(">>> WT");
330 fscache_stat(stat_op_waits);
331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
332 fscache_wait_bit_interruptible,
333 TASK_INTERRUPTIBLE) < 0) {
334 ret = fscache_cancel_op(&op->op);
335 if (ret == 0)
336 return -ERESTARTSYS;
337
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
341 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
342 }
343 _debug("<<< GO");
344
345check_if_dead:
346 if (unlikely(fscache_object_is_dead(object))) {
347 fscache_stat(stat_object_dead);
348 return -ENOBUFS;
349 }
350 return 0;
351}
352
353/*
224 * read a page from the cache or allocate a block in which to store it 354 * read a page from the cache or allocate a block in which to store it
225 * - we return: 355 * - we return:
226 * -ENOMEM - out of memory, nothing done 356 * -ENOMEM - out of memory, nothing done
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
257 _leave(" = -ENOMEM"); 387 _leave(" = -ENOMEM");
258 return -ENOMEM; 388 return -ENOMEM;
259 } 389 }
390 fscache_set_op_name(&op->op, "RetrRA1");
260 391
261 spin_lock(&cookie->lock); 392 spin_lock(&cookie->lock);
262 393
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
267 398
268 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 399 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
269 400
401 atomic_inc(&object->n_reads);
402 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
403
270 if (fscache_submit_op(object, &op->op) < 0) 404 if (fscache_submit_op(object, &op->op) < 0)
271 goto nobufs_unlock; 405 goto nobufs_unlock;
272 spin_unlock(&cookie->lock); 406 spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
279 413
280 /* we wait for the operation to become active, and then process it 414 /* we wait for the operation to become active, and then process it
281 * *here*, in this thread, and not in the thread pool */ 415 * *here*, in this thread, and not in the thread pool */
282 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 416 ret = fscache_wait_for_retrieval_activation(
283 _debug(">>> WT"); 417 object, op,
284 fscache_stat(&fscache_n_retrieval_op_waits); 418 __fscache_stat(&fscache_n_retrieval_op_waits),
285 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 419 __fscache_stat(&fscache_n_retrievals_object_dead));
286 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 420 if (ret < 0)
287 _debug("<<< GO"); 421 goto error;
288 }
289 422
290 /* ask the cache to honour the operation */ 423 /* ask the cache to honour the operation */
291 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 424 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
425 fscache_stat(&fscache_n_cop_allocate_page);
292 ret = object->cache->ops->allocate_page(op, page, gfp); 426 ret = object->cache->ops->allocate_page(op, page, gfp);
427 fscache_stat_d(&fscache_n_cop_allocate_page);
293 if (ret == 0) 428 if (ret == 0)
294 ret = -ENODATA; 429 ret = -ENODATA;
295 } else { 430 } else {
431 fscache_stat(&fscache_n_cop_read_or_alloc_page);
296 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 432 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
433 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
297 } 434 }
298 435
436error:
299 if (ret == -ENOMEM) 437 if (ret == -ENOMEM)
300 fscache_stat(&fscache_n_retrievals_nomem); 438 fscache_stat(&fscache_n_retrievals_nomem);
301 else if (ret == -ERESTARTSYS) 439 else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
347 void *context, 485 void *context,
348 gfp_t gfp) 486 gfp_t gfp)
349{ 487{
350 fscache_pages_retrieval_func_t func;
351 struct fscache_retrieval *op; 488 struct fscache_retrieval *op;
352 struct fscache_object *object; 489 struct fscache_object *object;
353 int ret; 490 int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
369 op = fscache_alloc_retrieval(mapping, end_io_func, context); 506 op = fscache_alloc_retrieval(mapping, end_io_func, context);
370 if (!op) 507 if (!op)
371 return -ENOMEM; 508 return -ENOMEM;
509 fscache_set_op_name(&op->op, "RetrRAN");
372 510
373 spin_lock(&cookie->lock); 511 spin_lock(&cookie->lock);
374 512
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
377 object = hlist_entry(cookie->backing_objects.first, 515 object = hlist_entry(cookie->backing_objects.first,
378 struct fscache_object, cookie_link); 516 struct fscache_object, cookie_link);
379 517
518 atomic_inc(&object->n_reads);
519 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
520
380 if (fscache_submit_op(object, &op->op) < 0) 521 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 522 goto nobufs_unlock;
382 spin_unlock(&cookie->lock); 523 spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
389 530
390 /* we wait for the operation to become active, and then process it 531 /* we wait for the operation to become active, and then process it
391 * *here*, in this thread, and not in the thread pool */ 532 * *here*, in this thread, and not in the thread pool */
392 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 533 ret = fscache_wait_for_retrieval_activation(
393 _debug(">>> WT"); 534 object, op,
394 fscache_stat(&fscache_n_retrieval_op_waits); 535 __fscache_stat(&fscache_n_retrieval_op_waits),
395 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 536 __fscache_stat(&fscache_n_retrievals_object_dead));
396 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 537 if (ret < 0)
397 _debug("<<< GO"); 538 goto error;
398 }
399 539
400 /* ask the cache to honour the operation */ 540 /* ask the cache to honour the operation */
401 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) 541 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
402 func = object->cache->ops->allocate_pages; 542 fscache_stat(&fscache_n_cop_allocate_pages);
403 else 543 ret = object->cache->ops->allocate_pages(
404 func = object->cache->ops->read_or_alloc_pages; 544 op, pages, nr_pages, gfp);
405 ret = func(op, pages, nr_pages, gfp); 545 fscache_stat_d(&fscache_n_cop_allocate_pages);
546 } else {
547 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
548 ret = object->cache->ops->read_or_alloc_pages(
549 op, pages, nr_pages, gfp);
550 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
551 }
406 552
553error:
407 if (ret == -ENOMEM) 554 if (ret == -ENOMEM)
408 fscache_stat(&fscache_n_retrievals_nomem); 555 fscache_stat(&fscache_n_retrievals_nomem);
409 else if (ret == -ERESTARTSYS) 556 else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
461 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 608 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
462 if (!op) 609 if (!op)
463 return -ENOMEM; 610 return -ENOMEM;
611 fscache_set_op_name(&op->op, "RetrAL1");
464 612
465 spin_lock(&cookie->lock); 613 spin_lock(&cookie->lock);
466 614
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
475 623
476 fscache_stat(&fscache_n_alloc_ops); 624 fscache_stat(&fscache_n_alloc_ops);
477 625
478 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 626 ret = fscache_wait_for_retrieval_activation(
479 _debug(">>> WT"); 627 object, op,
480 fscache_stat(&fscache_n_alloc_op_waits); 628 __fscache_stat(&fscache_n_alloc_op_waits),
481 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 629 __fscache_stat(&fscache_n_allocs_object_dead));
482 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 630 if (ret < 0)
483 _debug("<<< GO"); 631 goto error;
484 }
485 632
486 /* ask the cache to honour the operation */ 633 /* ask the cache to honour the operation */
634 fscache_stat(&fscache_n_cop_allocate_page);
487 ret = object->cache->ops->allocate_page(op, page, gfp); 635 ret = object->cache->ops->allocate_page(op, page, gfp);
636 fscache_stat_d(&fscache_n_cop_allocate_page);
488 637
489 if (ret < 0) 638error:
639 if (ret == -ERESTARTSYS)
640 fscache_stat(&fscache_n_allocs_intr);
641 else if (ret < 0)
490 fscache_stat(&fscache_n_allocs_nobufs); 642 fscache_stat(&fscache_n_allocs_nobufs);
491 else 643 else
492 fscache_stat(&fscache_n_allocs_ok); 644 fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
521 struct fscache_storage *op = 673 struct fscache_storage *op =
522 container_of(_op, struct fscache_storage, op); 674 container_of(_op, struct fscache_storage, op);
523 struct fscache_object *object = op->op.object; 675 struct fscache_object *object = op->op.object;
524 struct fscache_cookie *cookie = object->cookie; 676 struct fscache_cookie *cookie;
525 struct page *page; 677 struct page *page;
526 unsigned n; 678 unsigned n;
527 void *results[1]; 679 void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
529 681
530 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 682 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
531 683
532 spin_lock(&cookie->lock); 684 fscache_set_op_state(&op->op, "GetPage");
685
533 spin_lock(&object->lock); 686 spin_lock(&object->lock);
687 cookie = object->cookie;
534 688
535 if (!fscache_object_is_active(object)) { 689 if (!fscache_object_is_active(object) || !cookie) {
536 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
537 spin_unlock(&cookie->lock);
538 _leave(""); 691 _leave("");
539 return; 692 return;
540 } 693 }
541 694
695 spin_lock(&cookie->stores_lock);
696
542 fscache_stat(&fscache_n_store_calls); 697 fscache_stat(&fscache_n_store_calls);
543 698
544 /* find a page to store */ 699 /* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
549 goto superseded; 704 goto superseded;
550 page = results[0]; 705 page = results[0];
551 _debug("gang %d [%lx]", n, page->index); 706 _debug("gang %d [%lx]", n, page->index);
552 if (page->index > op->store_limit) 707 if (page->index > op->store_limit) {
708 fscache_stat(&fscache_n_store_pages_over_limit);
553 goto superseded; 709 goto superseded;
710 }
554 711
555 radix_tree_tag_clear(&cookie->stores, page->index, 712 if (page) {
556 FSCACHE_COOKIE_PENDING_TAG); 713 radix_tree_tag_set(&cookie->stores, page->index,
714 FSCACHE_COOKIE_STORING_TAG);
715 radix_tree_tag_clear(&cookie->stores, page->index,
716 FSCACHE_COOKIE_PENDING_TAG);
717 }
557 718
719 spin_unlock(&cookie->stores_lock);
558 spin_unlock(&object->lock); 720 spin_unlock(&object->lock);
559 spin_unlock(&cookie->lock);
560 721
561 if (page) { 722 if (page) {
723 fscache_set_op_state(&op->op, "Store");
724 fscache_stat(&fscache_n_store_pages);
725 fscache_stat(&fscache_n_cop_write_page);
562 ret = object->cache->ops->write_page(op, page); 726 ret = object->cache->ops->write_page(op, page);
563 fscache_end_page_write(cookie, page); 727 fscache_stat_d(&fscache_n_cop_write_page);
564 page_cache_release(page); 728 fscache_set_op_state(&op->op, "EndWrite");
565 if (ret < 0) 729 fscache_end_page_write(object, page);
730 if (ret < 0) {
731 fscache_set_op_state(&op->op, "Abort");
566 fscache_abort_object(object); 732 fscache_abort_object(object);
567 else 733 } else {
568 fscache_enqueue_operation(&op->op); 734 fscache_enqueue_operation(&op->op);
735 }
569 } 736 }
570 737
571 _leave(""); 738 _leave("");
@@ -575,9 +742,9 @@ superseded:
575 /* this writer is going away and there aren't any more things to 742 /* this writer is going away and there aren't any more things to
576 * write */ 743 * write */
577 _debug("cease"); 744 _debug("cease");
745 spin_unlock(&cookie->stores_lock);
578 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 746 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
579 spin_unlock(&object->lock); 747 spin_unlock(&object->lock);
580 spin_unlock(&cookie->lock);
581 _leave(""); 748 _leave("");
582} 749}
583 750
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
634 fscache_operation_init(&op->op, fscache_release_write_op); 801 fscache_operation_init(&op->op, fscache_release_write_op);
635 fscache_operation_init_slow(&op->op, fscache_write_op); 802 fscache_operation_init_slow(&op->op, fscache_write_op);
636 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); 803 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
804 fscache_set_op_name(&op->op, "Write1");
637 805
638 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 806 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
639 if (ret < 0) 807 if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
652 /* add the page to the pending-storage radix tree on the backing 820 /* add the page to the pending-storage radix tree on the backing
653 * object */ 821 * object */
654 spin_lock(&object->lock); 822 spin_lock(&object->lock);
823 spin_lock(&cookie->stores_lock);
655 824
656 _debug("store limit %llx", (unsigned long long) object->store_limit); 825 _debug("store limit %llx", (unsigned long long) object->store_limit);
657 826
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
672 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 841 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
673 goto already_pending; 842 goto already_pending;
674 843
844 spin_unlock(&cookie->stores_lock);
675 spin_unlock(&object->lock); 845 spin_unlock(&object->lock);
676 846
677 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 847 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
693already_queued: 863already_queued:
694 fscache_stat(&fscache_n_stores_again); 864 fscache_stat(&fscache_n_stores_again);
695already_pending: 865already_pending:
866 spin_unlock(&cookie->stores_lock);
696 spin_unlock(&object->lock); 867 spin_unlock(&object->lock);
697 spin_unlock(&cookie->lock); 868 spin_unlock(&cookie->lock);
698 radix_tree_preload_end(); 869 radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
702 return 0; 873 return 0;
703 874
704submit_failed: 875submit_failed:
876 spin_lock(&cookie->stores_lock);
705 radix_tree_delete(&cookie->stores, page->index); 877 radix_tree_delete(&cookie->stores, page->index);
878 spin_unlock(&cookie->stores_lock);
706 page_cache_release(page); 879 page_cache_release(page);
707 ret = -ENOBUFS; 880 ret = -ENOBUFS;
708 goto nobufs; 881 goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
763 if (TestClearPageFsCache(page) && 936 if (TestClearPageFsCache(page) &&
764 object->cache->ops->uncache_page) { 937 object->cache->ops->uncache_page) {
765 /* the cache backend releases the cookie lock */ 938 /* the cache backend releases the cookie lock */
939 fscache_stat(&fscache_n_cop_uncache_page);
766 object->cache->ops->uncache_page(object, page); 940 object->cache->ops->uncache_page(object, page);
941 fscache_stat_d(&fscache_n_cop_uncache_page);
767 goto done; 942 goto done;
768 } 943 }
769 944
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index beeab44bc31a..1d9e4951a597 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
37 goto error_histogram; 37 goto error_histogram;
38#endif 38#endif
39 39
40#ifdef CONFIG_FSCACHE_OBJECT_LIST
41 if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
42 &fscache_objlist_fops))
43 goto error_objects;
44#endif
45
40 _leave(" = 0"); 46 _leave(" = 0");
41 return 0; 47 return 0;
42 48
49#ifdef CONFIG_FSCACHE_OBJECT_LIST
50error_objects:
51#endif
43#ifdef CONFIG_FSCACHE_HISTOGRAM 52#ifdef CONFIG_FSCACHE_HISTOGRAM
53 remove_proc_entry("fs/fscache/histogram", NULL);
44error_histogram: 54error_histogram:
45#endif 55#endif
46#ifdef CONFIG_FSCACHE_STATS 56#ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
58 */ 68 */
59void fscache_proc_cleanup(void) 69void fscache_proc_cleanup(void)
60{ 70{
71#ifdef CONFIG_FSCACHE_OBJECT_LIST
72 remove_proc_entry("fs/fscache/objects", NULL);
73#endif
61#ifdef CONFIG_FSCACHE_HISTOGRAM 74#ifdef CONFIG_FSCACHE_HISTOGRAM
62 remove_proc_entry("fs/fscache/histogram", NULL); 75 remove_proc_entry("fs/fscache/histogram", NULL);
63#endif 76#endif
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 65deb99e756b..46435f3aae68 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
25atomic_t fscache_n_op_deferred_release; 25atomic_t fscache_n_op_deferred_release;
26atomic_t fscache_n_op_release; 26atomic_t fscache_n_op_release;
27atomic_t fscache_n_op_gc; 27atomic_t fscache_n_op_gc;
28atomic_t fscache_n_op_cancelled;
29atomic_t fscache_n_op_rejected;
28 30
29atomic_t fscache_n_attr_changed; 31atomic_t fscache_n_attr_changed;
30atomic_t fscache_n_attr_changed_ok; 32atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
36atomic_t fscache_n_allocs_ok; 38atomic_t fscache_n_allocs_ok;
37atomic_t fscache_n_allocs_wait; 39atomic_t fscache_n_allocs_wait;
38atomic_t fscache_n_allocs_nobufs; 40atomic_t fscache_n_allocs_nobufs;
41atomic_t fscache_n_allocs_intr;
42atomic_t fscache_n_allocs_object_dead;
39atomic_t fscache_n_alloc_ops; 43atomic_t fscache_n_alloc_ops;
40atomic_t fscache_n_alloc_op_waits; 44atomic_t fscache_n_alloc_op_waits;
41 45
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
46atomic_t fscache_n_retrievals_nobufs; 50atomic_t fscache_n_retrievals_nobufs;
47atomic_t fscache_n_retrievals_intr; 51atomic_t fscache_n_retrievals_intr;
48atomic_t fscache_n_retrievals_nomem; 52atomic_t fscache_n_retrievals_nomem;
53atomic_t fscache_n_retrievals_object_dead;
49atomic_t fscache_n_retrieval_ops; 54atomic_t fscache_n_retrieval_ops;
50atomic_t fscache_n_retrieval_op_waits; 55atomic_t fscache_n_retrieval_op_waits;
51 56
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
56atomic_t fscache_n_stores_oom; 61atomic_t fscache_n_stores_oom;
57atomic_t fscache_n_store_ops; 62atomic_t fscache_n_store_ops;
58atomic_t fscache_n_store_calls; 63atomic_t fscache_n_store_calls;
64atomic_t fscache_n_store_pages;
65atomic_t fscache_n_store_radix_deletes;
66atomic_t fscache_n_store_pages_over_limit;
67
68atomic_t fscache_n_store_vmscan_not_storing;
69atomic_t fscache_n_store_vmscan_gone;
70atomic_t fscache_n_store_vmscan_busy;
71atomic_t fscache_n_store_vmscan_cancelled;
59 72
60atomic_t fscache_n_marks; 73atomic_t fscache_n_marks;
61atomic_t fscache_n_uncaches; 74atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
74atomic_t fscache_n_relinquishes; 87atomic_t fscache_n_relinquishes;
75atomic_t fscache_n_relinquishes_null; 88atomic_t fscache_n_relinquishes_null;
76atomic_t fscache_n_relinquishes_waitcrt; 89atomic_t fscache_n_relinquishes_waitcrt;
90atomic_t fscache_n_relinquishes_retire;
77 91
78atomic_t fscache_n_cookie_index; 92atomic_t fscache_n_cookie_index;
79atomic_t fscache_n_cookie_data; 93atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
84atomic_t fscache_n_object_lookups; 98atomic_t fscache_n_object_lookups;
85atomic_t fscache_n_object_lookups_negative; 99atomic_t fscache_n_object_lookups_negative;
86atomic_t fscache_n_object_lookups_positive; 100atomic_t fscache_n_object_lookups_positive;
101atomic_t fscache_n_object_lookups_timed_out;
87atomic_t fscache_n_object_created; 102atomic_t fscache_n_object_created;
88atomic_t fscache_n_object_avail; 103atomic_t fscache_n_object_avail;
89atomic_t fscache_n_object_dead; 104atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
93atomic_t fscache_n_checkaux_update; 108atomic_t fscache_n_checkaux_update;
94atomic_t fscache_n_checkaux_obsolete; 109atomic_t fscache_n_checkaux_obsolete;
95 110
111atomic_t fscache_n_cop_alloc_object;
112atomic_t fscache_n_cop_lookup_object;
113atomic_t fscache_n_cop_lookup_complete;
114atomic_t fscache_n_cop_grab_object;
115atomic_t fscache_n_cop_update_object;
116atomic_t fscache_n_cop_drop_object;
117atomic_t fscache_n_cop_put_object;
118atomic_t fscache_n_cop_sync_cache;
119atomic_t fscache_n_cop_attr_changed;
120atomic_t fscache_n_cop_read_or_alloc_page;
121atomic_t fscache_n_cop_read_or_alloc_pages;
122atomic_t fscache_n_cop_allocate_page;
123atomic_t fscache_n_cop_allocate_pages;
124atomic_t fscache_n_cop_write_page;
125atomic_t fscache_n_cop_uncache_page;
126atomic_t fscache_n_cop_dissociate_pages;
127
96/* 128/*
97 * display the general statistics 129 * display the general statistics
98 */ 130 */
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
129 atomic_read(&fscache_n_acquires_nobufs), 161 atomic_read(&fscache_n_acquires_nobufs),
130 atomic_read(&fscache_n_acquires_oom)); 162 atomic_read(&fscache_n_acquires_oom));
131 163
132 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", 164 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
133 atomic_read(&fscache_n_object_lookups), 165 atomic_read(&fscache_n_object_lookups),
134 atomic_read(&fscache_n_object_lookups_negative), 166 atomic_read(&fscache_n_object_lookups_negative),
135 atomic_read(&fscache_n_object_lookups_positive), 167 atomic_read(&fscache_n_object_lookups_positive),
168 atomic_read(&fscache_n_object_lookups_timed_out),
136 atomic_read(&fscache_n_object_created)); 169 atomic_read(&fscache_n_object_created));
137 170
138 seq_printf(m, "Updates: n=%u nul=%u run=%u\n", 171 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
140 atomic_read(&fscache_n_updates_null), 173 atomic_read(&fscache_n_updates_null),
141 atomic_read(&fscache_n_updates_run)); 174 atomic_read(&fscache_n_updates_run));
142 175
143 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", 176 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
144 atomic_read(&fscache_n_relinquishes), 177 atomic_read(&fscache_n_relinquishes),
145 atomic_read(&fscache_n_relinquishes_null), 178 atomic_read(&fscache_n_relinquishes_null),
146 atomic_read(&fscache_n_relinquishes_waitcrt)); 179 atomic_read(&fscache_n_relinquishes_waitcrt),
180 atomic_read(&fscache_n_relinquishes_retire));
147 181
148 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", 182 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
149 atomic_read(&fscache_n_attr_changed), 183 atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
152 atomic_read(&fscache_n_attr_changed_nomem), 186 atomic_read(&fscache_n_attr_changed_nomem),
153 atomic_read(&fscache_n_attr_changed_calls)); 187 atomic_read(&fscache_n_attr_changed_calls));
154 188
155 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", 189 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
156 atomic_read(&fscache_n_allocs), 190 atomic_read(&fscache_n_allocs),
157 atomic_read(&fscache_n_allocs_ok), 191 atomic_read(&fscache_n_allocs_ok),
158 atomic_read(&fscache_n_allocs_wait), 192 atomic_read(&fscache_n_allocs_wait),
159 atomic_read(&fscache_n_allocs_nobufs)); 193 atomic_read(&fscache_n_allocs_nobufs),
160 seq_printf(m, "Allocs : ops=%u owt=%u\n", 194 atomic_read(&fscache_n_allocs_intr));
195 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
161 atomic_read(&fscache_n_alloc_ops), 196 atomic_read(&fscache_n_alloc_ops),
162 atomic_read(&fscache_n_alloc_op_waits)); 197 atomic_read(&fscache_n_alloc_op_waits),
198 atomic_read(&fscache_n_allocs_object_dead));
163 199
164 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" 200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
165 " int=%u oom=%u\n", 201 " int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
170 atomic_read(&fscache_n_retrievals_nobufs), 206 atomic_read(&fscache_n_retrievals_nobufs),
171 atomic_read(&fscache_n_retrievals_intr), 207 atomic_read(&fscache_n_retrievals_intr),
172 atomic_read(&fscache_n_retrievals_nomem)); 208 atomic_read(&fscache_n_retrievals_nomem));
173 seq_printf(m, "Retrvls: ops=%u owt=%u\n", 209 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
174 atomic_read(&fscache_n_retrieval_ops), 210 atomic_read(&fscache_n_retrieval_ops),
175 atomic_read(&fscache_n_retrieval_op_waits)); 211 atomic_read(&fscache_n_retrieval_op_waits),
212 atomic_read(&fscache_n_retrievals_object_dead));
176 213
177 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", 214 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
178 atomic_read(&fscache_n_stores), 215 atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
180 atomic_read(&fscache_n_stores_again), 217 atomic_read(&fscache_n_stores_again),
181 atomic_read(&fscache_n_stores_nobufs), 218 atomic_read(&fscache_n_stores_nobufs),
182 atomic_read(&fscache_n_stores_oom)); 219 atomic_read(&fscache_n_stores_oom));
183 seq_printf(m, "Stores : ops=%u run=%u\n", 220 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
184 atomic_read(&fscache_n_store_ops), 221 atomic_read(&fscache_n_store_ops),
185 atomic_read(&fscache_n_store_calls)); 222 atomic_read(&fscache_n_store_calls),
223 atomic_read(&fscache_n_store_pages),
224 atomic_read(&fscache_n_store_radix_deletes),
225 atomic_read(&fscache_n_store_pages_over_limit));
186 226
187 seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", 227 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
228 atomic_read(&fscache_n_store_vmscan_not_storing),
229 atomic_read(&fscache_n_store_vmscan_gone),
230 atomic_read(&fscache_n_store_vmscan_busy),
231 atomic_read(&fscache_n_store_vmscan_cancelled));
232
233 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
188 atomic_read(&fscache_n_op_pend), 234 atomic_read(&fscache_n_op_pend),
189 atomic_read(&fscache_n_op_run), 235 atomic_read(&fscache_n_op_run),
190 atomic_read(&fscache_n_op_enqueue)); 236 atomic_read(&fscache_n_op_enqueue),
237 atomic_read(&fscache_n_op_cancelled),
238 atomic_read(&fscache_n_op_rejected));
191 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", 239 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
192 atomic_read(&fscache_n_op_deferred_release), 240 atomic_read(&fscache_n_op_deferred_release),
193 atomic_read(&fscache_n_op_release), 241 atomic_read(&fscache_n_op_release),
194 atomic_read(&fscache_n_op_gc)); 242 atomic_read(&fscache_n_op_gc));
243
244 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
245 atomic_read(&fscache_n_cop_alloc_object),
246 atomic_read(&fscache_n_cop_lookup_object),
247 atomic_read(&fscache_n_cop_lookup_complete),
248 atomic_read(&fscache_n_cop_grab_object));
249 seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
250 atomic_read(&fscache_n_cop_update_object),
251 atomic_read(&fscache_n_cop_drop_object),
252 atomic_read(&fscache_n_cop_put_object),
253 atomic_read(&fscache_n_cop_attr_changed),
254 atomic_read(&fscache_n_cop_sync_cache));
255 seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
256 atomic_read(&fscache_n_cop_read_or_alloc_page),
257 atomic_read(&fscache_n_cop_read_or_alloc_pages),
258 atomic_read(&fscache_n_cop_allocate_page),
259 atomic_read(&fscache_n_cop_allocate_pages),
260 atomic_read(&fscache_n_cop_write_page),
261 atomic_read(&fscache_n_cop_uncache_page),
262 atomic_read(&fscache_n_cop_dissociate_pages));
195 return 0; 263 return 0;
196} 264}
197 265
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 992f6c9410bb..4787ae6c5c1c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
385 if (fc->no_create) 385 if (fc->no_create)
386 return -ENOSYS; 386 return -ENOSYS;
387 387
388 if (flags & O_DIRECT)
389 return -EINVAL;
390
388 forget_req = fuse_get_req(fc); 391 forget_req = fuse_get_req(fc);
389 if (IS_ERR(forget_req)) 392 if (IS_ERR(forget_req))
390 return PTR_ERR(forget_req); 393 return PTR_ERR(forget_req);
@@ -712,8 +715,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
712 fuse_invalidate_attr(newdir); 715 fuse_invalidate_attr(newdir);
713 716
714 /* newent will end up negative */ 717 /* newent will end up negative */
715 if (newent->d_inode) 718 if (newent->d_inode) {
719 fuse_invalidate_attr(newent->d_inode);
716 fuse_invalidate_entry_cache(newent); 720 fuse_invalidate_entry_cache(newent);
721 }
717 } else if (err == -EINTR) { 722 } else if (err == -EINTR) {
718 /* If request was interrupted, DEITY only knows if the 723 /* If request was interrupted, DEITY only knows if the
719 rename actually took place. If the invalidation 724 rename actually took place. If the invalidation
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a3492f7d207c..c18913a777ae 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1063,7 +1063,8 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1063 break; 1063 break;
1064 } 1064 }
1065 } 1065 }
1066 fuse_put_request(fc, req); 1066 if (!IS_ERR(req))
1067 fuse_put_request(fc, req);
1067 if (res > 0) 1068 if (res > 0)
1068 *ppos = pos; 1069 *ppos = pos;
1069 1070
@@ -1599,7 +1600,7 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1599 kaddr += copy; 1600 kaddr += copy;
1600 } 1601 }
1601 1602
1602 kunmap(map); 1603 kunmap(page);
1603 } 1604 }
1604 1605
1605 return 0; 1606 return 0;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d082..5b31f7741a8f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
114 if (error) 114 if (error)
115 goto fail_unregister; 115 goto fail_unregister;
116 116
117 error = slow_work_register_user(); 117 error = slow_work_register_user(THIS_MODULE);
118 if (error) 118 if (error)
119 goto fail_slow; 119 goto fail_slow;
120 120
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
163 gfs2_unregister_debugfs(); 163 gfs2_unregister_debugfs();
164 unregister_filesystem(&gfs2_fs_type); 164 unregister_filesystem(&gfs2_fs_type);
165 unregister_filesystem(&gfs2meta_fs_type); 165 unregister_filesystem(&gfs2meta_fs_type);
166 slow_work_unregister_user(); 166 slow_work_unregister_user(THIS_MODULE);
167 167
168 kmem_cache_destroy(gfs2_quotad_cachep); 168 kmem_cache_destroy(gfs2_quotad_cachep);
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 169 kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d3..09fa31965576 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -7,6 +7,7 @@
7 * of the GNU General Public License version 2. 7 * of the GNU General Public License version 2.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/completion.h> 13#include <linux/completion.h>
@@ -593,6 +594,7 @@ fail:
593} 594}
594 595
595struct slow_work_ops gfs2_recover_ops = { 596struct slow_work_ops gfs2_recover_ops = {
597 .owner = THIS_MODULE,
596 .get_ref = gfs2_recover_get_ref, 598 .get_ref = gfs2_recover_get_ref,
597 .put_ref = gfs2_recover_put_ref, 599 .put_ref = gfs2_recover_put_ref,
598 .execute = gfs2_recover_work, 600 .execute = gfs2_recover_work,
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 7b17a14396ff..6c751106c2e5 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -254,7 +254,7 @@ int __generic_block_fiemap(struct inode *inode,
254 u64 len, get_block_t *get_block) 254 u64 len, get_block_t *get_block)
255{ 255{
256 struct buffer_head tmp; 256 struct buffer_head tmp;
257 unsigned int start_blk; 257 unsigned long long start_blk;
258 long long length = 0, map_len = 0; 258 long long length = 0, map_len = 0;
259 u64 logical = 0, phys = 0, size = 0; 259 u64 logical = 0, phys = 0, size = 0;
260 u32 flags = FIEMAP_EXTENT_MERGED; 260 u32 flags = FIEMAP_EXTENT_MERGED;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index bd3c073b485d..4160afad6d00 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -73,6 +73,7 @@ EXPORT_SYMBOL(journal_errno);
73EXPORT_SYMBOL(journal_ack_err); 73EXPORT_SYMBOL(journal_ack_err);
74EXPORT_SYMBOL(journal_clear_err); 74EXPORT_SYMBOL(journal_clear_err);
75EXPORT_SYMBOL(log_wait_commit); 75EXPORT_SYMBOL(log_wait_commit);
76EXPORT_SYMBOL(log_start_commit);
76EXPORT_SYMBOL(journal_start_commit); 77EXPORT_SYMBOL(journal_start_commit);
77EXPORT_SYMBOL(journal_force_commit_nested); 78EXPORT_SYMBOL(journal_force_commit_nested);
78EXPORT_SYMBOL(journal_wipe); 79EXPORT_SYMBOL(journal_wipe);
@@ -756,6 +757,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
756 757
757 return journal; 758 return journal;
758out_err: 759out_err:
760 kfree(journal->j_wbuf);
759 kfree(journal); 761 kfree(journal);
760 return NULL; 762 return NULL;
761} 763}
@@ -820,6 +822,7 @@ journal_t * journal_init_inode (struct inode *inode)
820 822
821 return journal; 823 return journal;
822out_err: 824out_err:
825 kfree(journal->j_wbuf);
823 kfree(journal); 826 kfree(journal);
824 return NULL; 827 return NULL;
825} 828}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b0ab5219becb..fed85388ee86 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -913,6 +913,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
913 913
914 return journal; 914 return journal;
915out_err: 915out_err:
916 kfree(journal->j_wbuf);
916 jbd2_stats_proc_exit(journal); 917 jbd2_stats_proc_exit(journal);
917 kfree(journal); 918 kfree(journal);
918 return NULL; 919 return NULL;
@@ -986,6 +987,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
986 987
987 return journal; 988 return journal;
988out_err: 989out_err:
990 kfree(journal->j_wbuf);
989 jbd2_stats_proc_exit(journal); 991 jbd2_stats_proc_exit(journal);
990 kfree(journal); 992 kfree(journal);
991 return NULL; 993 return NULL;
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index cfe05c1966a5..3f39be1b0455 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
164 164
165 /* XXX FIXME: Where a single physical node actually shows up in two 165 /* XXX FIXME: Where a single physical node actually shows up in two
166 frags, we read it twice. Don't do that. */ 166 frags, we read it twice. Don't do that. */
167 /* Now we're pointing at the first frag which overlaps our page */ 167 /* Now we're pointing at the first frag which overlaps our page
168 * (or perhaps is before it, if we've been asked to read off the
169 * end of the file). */
168 while(offset < end) { 170 while(offset < end) {
169 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); 171 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
170 if (unlikely(!frag || frag->ofs > offset)) { 172 if (unlikely(!frag || frag->ofs > offset ||
173 frag->ofs + frag->size <= offset)) {
171 uint32_t holesize = end - offset; 174 uint32_t holesize = end - offset;
172 if (frag) { 175 if (frag && frag->ofs > offset) {
173 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); 176 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
174 holesize = min(holesize, frag->ofs - offset); 177 holesize = min(holesize, frag->ofs - offset);
175 } 178 }
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 70fad69eb959..fa588006588d 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
359 359
360 BUG_ON(!cookie); 360 BUG_ON(!cookie);
361 361
362 if (fscache_check_page_write(cookie, page)) {
363 if (!(gfp & __GFP_WAIT))
364 return 0;
365 fscache_wait_on_page_write(cookie, page);
366 }
367
368 if (PageFsCache(page)) { 362 if (PageFsCache(page)) {
369 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
370 cookie, page, nfsi); 364 cookie, page, nfsi);
371 365
372 fscache_uncache_page(cookie, page); 366 if (!fscache_maybe_release_page(cookie, page, gfp))
367 return 0;
368
373 nfs_add_fscache_stats(page->mapping->host, 369 nfs_add_fscache_stats(page->mapping->host,
374 NFSIOS_FSCACHE_PAGES_UNCACHED, 1); 370 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
375 } 371 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff37454fa783..741a562177fc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2767,7 +2767,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2767 .pages = &page, 2767 .pages = &page,
2768 .pgbase = 0, 2768 .pgbase = 0,
2769 .count = count, 2769 .count = count,
2770 .bitmask = NFS_SERVER(dentry->d_inode)->cache_consistency_bitmask, 2770 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
2771 }; 2771 };
2772 struct nfs4_readdir_res res; 2772 struct nfs4_readdir_res res;
2773 struct rpc_message msg = { 2773 struct rpc_message msg = {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index edf926e1062f..d0a2ce1b4324 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -958,7 +958,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino); 958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
959 959
960 if (plus) 960 if (plus)
961 p = encode_entryplus_baggage(cd, p1, name, namlen); 961 p1 = encode_entryplus_baggage(cd, p1, name, namlen);
962 962
963 /* determine entry word length and lengths to go in pages */ 963 /* determine entry word length and lengths to go in pages */
964 num_entry_words = p1 - tmp; 964 num_entry_words = p1 - tmp;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 5941958f1e47..84c25382f8e3 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -87,6 +87,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
87 brelse(bh); 87 brelse(bh);
88 BUG(); 88 BUG();
89 } 89 }
90 memset(bh->b_data, 0, 1 << inode->i_blkbits);
90 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; 91 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
91 bh->b_blocknr = blocknr; 92 bh->b_blocknr = blocknr;
92 set_buffer_mapped(bh); 93 set_buffer_mapped(bh);
@@ -276,8 +277,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
276 "invalid oldkey %lld (newkey=%lld)", 277 "invalid oldkey %lld (newkey=%lld)",
277 (unsigned long long)oldkey, 278 (unsigned long long)oldkey,
278 (unsigned long long)newkey); 279 (unsigned long long)newkey);
279 if (!test_set_buffer_dirty(obh) && TestSetPageDirty(opage)) 280 nilfs_btnode_mark_dirty(obh);
280 BUG();
281 281
282 spin_lock_irq(&btnc->tree_lock); 282 spin_lock_irq(&btnc->tree_lock);
283 radix_tree_delete(&btnc->page_tree, oldkey); 283 radix_tree_delete(&btnc->page_tree, oldkey);
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 1c6cfb59128d..3f5d5d06f53c 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -871,7 +871,6 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
871 * exclusive with a new mount job. Though it doesn't cover 871 * exclusive with a new mount job. Though it doesn't cover
872 * umount, it's enough for the purpose. 872 * umount, it's enough for the purpose.
873 */ 873 */
874 mutex_lock(&nilfs->ns_mount_mutex);
875 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { 874 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
876 /* Current implementation does not have to protect 875 /* Current implementation does not have to protect
877 plain read-only mounts since they are exclusive 876 plain read-only mounts since they are exclusive
@@ -880,7 +879,6 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
880 ret = -EBUSY; 879 ret = -EBUSY;
881 } else 880 } else
882 ret = nilfs_cpfile_clear_snapshot(cpfile, cno); 881 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
883 mutex_unlock(&nilfs->ns_mount_mutex);
884 return ret; 882 return ret;
885 case NILFS_SNAPSHOT: 883 case NILFS_SNAPSHOT:
886 return nilfs_cpfile_set_snapshot(cpfile, cno); 884 return nilfs_cpfile_set_snapshot(cpfile, cno);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 5040220c3732..2a0a5a3ac134 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -664,7 +664,6 @@ int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
664 int err; 664 int err;
665 665
666 spin_lock(&sbi->s_inode_lock); 666 spin_lock(&sbi->s_inode_lock);
667 /* Caller of this function MUST lock s_inode_lock */
668 if (ii->i_bh == NULL) { 667 if (ii->i_bh == NULL) {
669 spin_unlock(&sbi->s_inode_lock); 668 spin_unlock(&sbi->s_inode_lock);
670 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 669 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 6572ea4bc4df..f6af76042d80 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -99,7 +99,8 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
99static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, 99static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
100 unsigned int cmd, void __user *argp) 100 unsigned int cmd, void __user *argp)
101{ 101{
102 struct inode *cpfile = NILFS_SB(inode->i_sb)->s_nilfs->ns_cpfile; 102 struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
103 struct inode *cpfile = nilfs->ns_cpfile;
103 struct nilfs_transaction_info ti; 104 struct nilfs_transaction_info ti;
104 struct nilfs_cpmode cpmode; 105 struct nilfs_cpmode cpmode;
105 int ret; 106 int ret;
@@ -109,14 +110,17 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
109 if (copy_from_user(&cpmode, argp, sizeof(cpmode))) 110 if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
110 return -EFAULT; 111 return -EFAULT;
111 112
113 mutex_lock(&nilfs->ns_mount_mutex);
112 nilfs_transaction_begin(inode->i_sb, &ti, 0); 114 nilfs_transaction_begin(inode->i_sb, &ti, 0);
113 ret = nilfs_cpfile_change_cpmode( 115 ret = nilfs_cpfile_change_cpmode(
114 cpfile, cpmode.cm_cno, cpmode.cm_mode); 116 cpfile, cpmode.cm_cno, cpmode.cm_mode);
115 if (unlikely(ret < 0)) { 117 if (unlikely(ret < 0)) {
116 nilfs_transaction_abort(inode->i_sb); 118 nilfs_transaction_abort(inode->i_sb);
119 mutex_unlock(&nilfs->ns_mount_mutex);
117 return ret; 120 return ret;
118 } 121 }
119 nilfs_transaction_commit(inode->i_sb); /* never fails */ 122 nilfs_transaction_commit(inode->i_sb); /* never fails */
123 mutex_unlock(&nilfs->ns_mount_mutex);
120 return ret; 124 return ret;
121} 125}
122 126
@@ -297,7 +301,18 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
297 (unsigned long long)vdesc->vd_vblocknr); 301 (unsigned long long)vdesc->vd_vblocknr);
298 return ret; 302 return ret;
299 } 303 }
300 bh->b_private = vdesc; 304 if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
305 printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, "
306 "cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n",
307 __func__, vdesc->vd_flags ? "node" : "data",
308 (unsigned long long)vdesc->vd_ino,
309 (unsigned long long)vdesc->vd_cno,
310 (unsigned long long)vdesc->vd_offset,
311 (unsigned long long)vdesc->vd_blocknr,
312 (unsigned long long)vdesc->vd_vblocknr);
313 brelse(bh);
314 return -EEXIST;
315 }
301 list_add_tail(&bh->b_assoc_buffers, buffers); 316 list_add_tail(&bh->b_assoc_buffers, buffers);
302 return 0; 317 return 0;
303} 318}
@@ -335,24 +350,10 @@ static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs,
335 list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { 350 list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
336 ret = nilfs_gccache_wait_and_mark_dirty(bh); 351 ret = nilfs_gccache_wait_and_mark_dirty(bh);
337 if (unlikely(ret < 0)) { 352 if (unlikely(ret < 0)) {
338 if (ret == -EEXIST) { 353 WARN_ON(ret == -EEXIST);
339 vdesc = bh->b_private;
340 printk(KERN_CRIT
341 "%s: conflicting %s buffer: "
342 "ino=%llu, cno=%llu, offset=%llu, "
343 "blocknr=%llu, vblocknr=%llu\n",
344 __func__,
345 vdesc->vd_flags ? "node" : "data",
346 (unsigned long long)vdesc->vd_ino,
347 (unsigned long long)vdesc->vd_cno,
348 (unsigned long long)vdesc->vd_offset,
349 (unsigned long long)vdesc->vd_blocknr,
350 (unsigned long long)vdesc->vd_vblocknr);
351 }
352 goto failed; 354 goto failed;
353 } 355 }
354 list_del_init(&bh->b_assoc_buffers); 356 list_del_init(&bh->b_assoc_buffers);
355 bh->b_private = NULL;
356 brelse(bh); 357 brelse(bh);
357 } 358 }
358 return nmembs; 359 return nmembs;
@@ -360,7 +361,6 @@ static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs,
360 failed: 361 failed:
361 list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { 362 list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
362 list_del_init(&bh->b_assoc_buffers); 363 list_del_init(&bh->b_assoc_buffers);
363 bh->b_private = NULL;
364 brelse(bh); 364 brelse(bh);
365 } 365 }
366 return ret; 366 return ret;
@@ -471,7 +471,6 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
471 return 0; 471 return 0;
472 472
473 failed: 473 failed:
474 nilfs_remove_all_gcinode(nilfs);
475 printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n", 474 printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
476 msg, ret); 475 msg, ret);
477 return ret; 476 return ret;
@@ -560,6 +559,8 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
560 else 559 else
561 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); 560 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
562 561
562 if (ret < 0)
563 nilfs_remove_all_gcinode(nilfs);
563 clear_nilfs_gc_running(nilfs); 564 clear_nilfs_gc_running(nilfs);
564 565
565 out_free: 566 out_free:
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 683df89dbae5..6eff66a070d5 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2468,17 +2468,22 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
2468 /* Clear requests (even when the construction failed) */ 2468 /* Clear requests (even when the construction failed) */
2469 spin_lock(&sci->sc_state_lock); 2469 spin_lock(&sci->sc_state_lock);
2470 2470
2471 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2472
2473 if (req->mode == SC_LSEG_SR) { 2471 if (req->mode == SC_LSEG_SR) {
2472 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2474 sci->sc_seq_done = req->seq_accepted; 2473 sci->sc_seq_done = req->seq_accepted;
2475 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); 2474 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
2476 sci->sc_flush_request = 0; 2475 sci->sc_flush_request = 0;
2477 } else if (req->mode == SC_FLUSH_FILE) 2476 } else {
2478 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2477 if (req->mode == SC_FLUSH_FILE)
2479 else if (req->mode == SC_FLUSH_DAT) 2478 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2480 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2479 else if (req->mode == SC_FLUSH_DAT)
2480 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2481 2481
2482 /* re-enable timer if checkpoint creation was not done */
2483 if (sci->sc_timer && (sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2484 time_before(jiffies, sci->sc_timer->expires))
2485 add_timer(sci->sc_timer);
2486 }
2482 spin_unlock(&sci->sc_state_lock); 2487 spin_unlock(&sci->sc_state_lock);
2483} 2488}
2484 2489
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 89fc8ee1f5a5..de059f490586 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1712,7 +1712,8 @@ int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1712 struct super_block *sb = inode->i_sb; 1712 struct super_block *sb = inode->i_sb;
1713 1713
1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || 1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) 1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1716 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1716 return 0; 1717 return 0;
1717 1718
1718 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 1719 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index eae404602424..d963d8638709 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -35,12 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#ifndef CONFIG_OCFS2_COMPAT_JBD 38#include <linux/jbd2.h>
39# include <linux/jbd2.h>
40#else
41# include <linux/jbd.h>
42# include "ocfs2_jbd_compat.h"
43#endif
44 39
45/* For union ocfs2_dlm_lksb */ 40/* For union ocfs2_dlm_lksb */
46#include "stackglue.h" 41#include "stackglue.h"
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 60287fc56bcb..3a0df7a1b810 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3743,6 +3743,9 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3743 goto out; 3743 goto out;
3744 } 3744 }
3745 3745
3746 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3747 goto attach_xattr;
3748
3746 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3749 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3747 3750
3748 size = i_size_read(inode); 3751 size = i_size_read(inode);
@@ -3769,6 +3772,7 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3769 cpos += num_clusters; 3772 cpos += num_clusters;
3770 } 3773 }
3771 3774
3775attach_xattr:
3772 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3776 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3773 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3777 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3774 &ref_tree->rf_ci, 3778 &ref_tree->rf_ci,
@@ -3858,6 +3862,49 @@ out:
3858 return ret; 3862 return ret;
3859} 3863}
3860 3864
3865static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3866 struct buffer_head *s_bh,
3867 struct inode *t_inode,
3868 struct buffer_head *t_bh)
3869{
3870 int ret;
3871 handle_t *handle;
3872 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3873 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3874 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3875
3876 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3877
3878 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3879 if (IS_ERR(handle)) {
3880 ret = PTR_ERR(handle);
3881 mlog_errno(ret);
3882 goto out;
3883 }
3884
3885 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3886 OCFS2_JOURNAL_ACCESS_WRITE);
3887 if (ret) {
3888 mlog_errno(ret);
3889 goto out_commit;
3890 }
3891
3892 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3893 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3894 le16_to_cpu(s_di->id2.i_data.id_count));
3895 spin_lock(&OCFS2_I(t_inode)->ip_lock);
3896 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3897 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3898 spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3899
3900 ocfs2_journal_dirty(handle, t_bh);
3901
3902out_commit:
3903 ocfs2_commit_trans(osb, handle);
3904out:
3905 return ret;
3906}
3907
3861static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3908static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3862 struct inode *t_inode, 3909 struct inode *t_inode,
3863 struct buffer_head *t_bh, 3910 struct buffer_head *t_bh,
@@ -3997,6 +4044,14 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
3997 goto out; 4044 goto out;
3998 } 4045 }
3999 4046
4047 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4048 ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4049 t_inode, t_bh);
4050 if (ret)
4051 mlog_errno(ret);
4052 goto out;
4053 }
4054
4000 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4055 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4001 1, &ref_tree, &ref_root_bh); 4056 1, &ref_tree, &ref_root_bh);
4002 if (ret) { 4057 if (ret) {
@@ -4013,10 +4068,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
4013 goto out_unlock_refcount; 4068 goto out_unlock_refcount;
4014 } 4069 }
4015 4070
4016 ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
4017 if (ret)
4018 mlog_errno(ret);
4019
4020out_unlock_refcount: 4071out_unlock_refcount:
4021 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4072 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4022 brelse(ref_root_bh); 4073 brelse(ref_root_bh);
@@ -4068,9 +4119,17 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4068 ret = ocfs2_reflink_xattrs(inode, old_bh, 4119 ret = ocfs2_reflink_xattrs(inode, old_bh,
4069 new_inode, new_bh, 4120 new_inode, new_bh,
4070 preserve); 4121 preserve);
4071 if (ret) 4122 if (ret) {
4072 mlog_errno(ret); 4123 mlog_errno(ret);
4124 goto inode_unlock;
4125 }
4073 } 4126 }
4127
4128 ret = ocfs2_complete_reflink(inode, old_bh,
4129 new_inode, new_bh, preserve);
4130 if (ret)
4131 mlog_errno(ret);
4132
4074inode_unlock: 4133inode_unlock:
4075 ocfs2_inode_unlock(new_inode, 1); 4134 ocfs2_inode_unlock(new_inode, 1);
4076 brelse(new_bh); 4135 brelse(new_bh);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c0e48aeebb1c..14f47d2bfe02 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -773,18 +773,20 @@ static int ocfs2_sb_probe(struct super_block *sb,
773 if (tmpstat < 0) { 773 if (tmpstat < 0) {
774 status = tmpstat; 774 status = tmpstat;
775 mlog_errno(status); 775 mlog_errno(status);
776 goto bail; 776 break;
777 } 777 }
778 di = (struct ocfs2_dinode *) (*bh)->b_data; 778 di = (struct ocfs2_dinode *) (*bh)->b_data;
779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); 779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
780 spin_lock_init(&stats->b_lock); 780 spin_lock_init(&stats->b_lock);
781 status = ocfs2_verify_volume(di, *bh, blksize, stats); 781 tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
782 if (status >= 0) 782 if (tmpstat < 0) {
783 goto bail; 783 brelse(*bh);
784 brelse(*bh); 784 *bh = NULL;
785 *bh = NULL; 785 }
786 if (status != -EAGAIN) 786 if (tmpstat != -EAGAIN) {
787 status = tmpstat;
787 break; 788 break;
789 }
788 } 790 }
789 791
790bail: 792bail:
@@ -1645,6 +1647,10 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1645 buf->f_bavail = buf->f_bfree; 1647 buf->f_bavail = buf->f_bfree;
1646 buf->f_files = numbits; 1648 buf->f_files = numbits;
1647 buf->f_ffree = freebits; 1649 buf->f_ffree = freebits;
1650 buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
1651 & 0xFFFFFFFFUL;
1652 buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
1653 OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
1648 1654
1649 brelse(bh); 1655 brelse(bh);
1650 1656
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index b6284f235d2f..c61369342a27 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -53,11 +53,6 @@
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/rbtree.h> 55#include <linux/rbtree.h>
56#ifndef CONFIG_OCFS2_COMPAT_JBD
57# include <linux/jbd2.h>
58#else
59# include <linux/jbd.h>
60#endif
61 56
62#define MLOG_MASK_PREFIX ML_UPTODATE 57#define MLOG_MASK_PREFIX ML_UPTODATE
63 58
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 07f77a7945c3..822c2d506518 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -571,7 +571,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
571 rsslim, 571 rsslim,
572 mm ? mm->start_code : 0, 572 mm ? mm->start_code : 0,
573 mm ? mm->end_code : 0, 573 mm ? mm->end_code : 0,
574 (permitted) ? task->stack_start : 0, 574 (permitted && mm) ? task->stack_start : 0,
575 esp, 575 esp,
576 eip, 576 eip,
577 /* The signal information here is obsolete. 577 /* The signal information here is obsolete.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 837469a96598..af643b5aefe8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2597,8 +2597,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2597 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2597 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2598 dentry = d_hash_and_lookup(mnt->mnt_root, &name); 2598 dentry = d_hash_and_lookup(mnt->mnt_root, &name);
2599 if (dentry) { 2599 if (dentry) {
2600 if (!(current->flags & PF_EXITING)) 2600 shrink_dcache_parent(dentry);
2601 shrink_dcache_parent(dentry);
2602 d_drop(dentry); 2601 d_drop(dentry);
2603 dput(dentry); 2602 dput(dentry);
2604 } 2603 }
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 5fad489ce5bc..e0201837d244 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -21,6 +21,7 @@
21#include <linux/completion.h> 21#include <linux/completion.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/security.h>
24#include "sysfs.h" 25#include "sysfs.h"
25 26
26DEFINE_MUTEX(sysfs_mutex); 27DEFINE_MUTEX(sysfs_mutex);
@@ -285,6 +286,9 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
285 sysfs_put(sd->s_symlink.target_sd); 286 sysfs_put(sd->s_symlink.target_sd);
286 if (sysfs_type(sd) & SYSFS_COPY_NAME) 287 if (sysfs_type(sd) & SYSFS_COPY_NAME)
287 kfree(sd->s_name); 288 kfree(sd->s_name);
289 if (sd->s_iattr && sd->s_iattr->ia_secdata)
290 security_release_secctx(sd->s_iattr->ia_secdata,
291 sd->s_iattr->ia_secdata_len);
288 kfree(sd->s_iattr); 292 kfree(sd->s_iattr);
289 sysfs_free_ino(sd->s_ino); 293 sysfs_free_ino(sd->s_ino);
290 kmem_cache_free(sysfs_dir_cachep, sd); 294 kmem_cache_free(sysfs_dir_cachep, sd);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1099395d7d6c..fb17f8226b09 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1980,7 +1980,7 @@ xlog_recover_do_reg_buffer(
1980 "XFS: NULL dquot in %s.", __func__); 1980 "XFS: NULL dquot in %s.", __func__);
1981 goto next; 1981 goto next;
1982 } 1982 }
1983 if (item->ri_buf[i].i_len < sizeof(xfs_dqblk_t)) { 1983 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1984 cmn_err(CE_ALERT, 1984 cmn_err(CE_ALERT,
1985 "XFS: dquot too small (%d) in %s.", 1985 "XFS: dquot too small (%d) in %s.",
1986 item->ri_buf[i].i_len, __func__); 1986 item->ri_buf[i].i_len, __func__);
@@ -2635,7 +2635,7 @@ xlog_recover_do_dquot_trans(
2635 "XFS: NULL dquot in %s.", __func__); 2635 "XFS: NULL dquot in %s.", __func__);
2636 return XFS_ERROR(EIO); 2636 return XFS_ERROR(EIO);
2637 } 2637 }
2638 if (item->ri_buf[1].i_len < sizeof(xfs_dqblk_t)) { 2638 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2639 cmn_err(CE_ALERT, 2639 cmn_err(CE_ALERT,
2640 "XFS: dquot too small (%d) in %s.", 2640 "XFS: dquot too small (%d) in %s.",
2641 item->ri_buf[1].i_len, __func__); 2641 item->ri_buf[1].i_len, __func__);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index f31271c30de9..2ffc570679be 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -467,6 +467,7 @@ xfs_trans_ail_update(
467{ 467{
468 xfs_log_item_t *dlip = NULL; 468 xfs_log_item_t *dlip = NULL;
469 xfs_log_item_t *mlip; /* ptr to minimum lip */ 469 xfs_log_item_t *mlip; /* ptr to minimum lip */
470 xfs_lsn_t tail_lsn;
470 471
471 mlip = xfs_ail_min(ailp); 472 mlip = xfs_ail_min(ailp);
472 473
@@ -483,8 +484,16 @@ xfs_trans_ail_update(
483 484
484 if (mlip == dlip) { 485 if (mlip == dlip) {
485 mlip = xfs_ail_min(ailp); 486 mlip = xfs_ail_min(ailp);
487 /*
488 * It is not safe to access mlip after the AIL lock is
489 * dropped, so we must get a copy of li_lsn before we do
490 * so. This is especially important on 32-bit platforms
491 * where accessing and updating 64-bit values like li_lsn
492 * is not atomic.
493 */
494 tail_lsn = mlip->li_lsn;
486 spin_unlock(&ailp->xa_lock); 495 spin_unlock(&ailp->xa_lock);
487 xfs_log_move_tail(ailp->xa_mount, mlip->li_lsn); 496 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
488 } else { 497 } else {
489 spin_unlock(&ailp->xa_lock); 498 spin_unlock(&ailp->xa_lock);
490 } 499 }
@@ -514,6 +523,7 @@ xfs_trans_ail_delete(
514{ 523{
515 xfs_log_item_t *dlip; 524 xfs_log_item_t *dlip;
516 xfs_log_item_t *mlip; 525 xfs_log_item_t *mlip;
526 xfs_lsn_t tail_lsn;
517 527
518 if (lip->li_flags & XFS_LI_IN_AIL) { 528 if (lip->li_flags & XFS_LI_IN_AIL) {
519 mlip = xfs_ail_min(ailp); 529 mlip = xfs_ail_min(ailp);
@@ -527,9 +537,16 @@ xfs_trans_ail_delete(
527 537
528 if (mlip == dlip) { 538 if (mlip == dlip) {
529 mlip = xfs_ail_min(ailp); 539 mlip = xfs_ail_min(ailp);
540 /*
541 * It is not safe to access mlip after the AIL lock
542 * is dropped, so we must get a copy of li_lsn
543 * before we do so. This is especially important
544 * on 32-bit platforms where accessing and updating
545 * 64-bit values like li_lsn is not atomic.
546 */
547 tail_lsn = mlip ? mlip->li_lsn : 0;
530 spin_unlock(&ailp->xa_lock); 548 spin_unlock(&ailp->xa_lock);
531 xfs_log_move_tail(ailp->xa_mount, 549 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
532 (mlip ? mlip->li_lsn : 0));
533 } else { 550 } else {
534 spin_unlock(&ailp->xa_lock); 551 spin_unlock(&ailp->xa_lock);
535 } 552 }