aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/aio.c62
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/bio.c14
-rw-r--r--fs/block_dev.c12
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h2
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/smbdes.c2
-rw-r--r--fs/coda/sysctl.c10
-rw-r--r--fs/compat_ioctl.c747
-rw-r--r--fs/debugfs/inode.c6
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/inode.c8
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fs-writeback.c28
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c4
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jffs2/compr.c2
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/lockd/svc.c26
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/sysctl.c22
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/notify/inotify/inotify_user.c18
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/sysctl.c4
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/blockcheck.c2
-rw-r--r--fs/ocfs2/cluster/netdebug.c8
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/refcounttree.c71
-rw-r--r--fs/ocfs2/stackglue.c15
-rw-r--r--fs/ocfs2/super.c20
-rw-r--r--fs/ocfs2/uptodate.c5
-rw-r--r--fs/omfs/bitmap.c2
-rw-r--r--fs/open.c27
-rw-r--r--fs/partitions/check.c12
-rw-r--r--fs/partitions/efi.c30
-rw-r--r--fs/partitions/efi.h8
-rw-r--r--fs/proc/array.c23
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/stat.c19
-rw-r--r--fs/qnx4/bitmap.c2
-rw-r--r--fs/qnx4/dir.c6
-rw-r--r--fs/qnx4/inode.c26
-rw-r--r--fs/qnx4/namei.c6
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c128
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/Makefile2
-rw-r--r--fs/reiserfs/bitmap.c4
-rw-r--r--fs/reiserfs/dir.c10
-rw-r--r--fs/reiserfs/do_balan.c17
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/fix_node.c21
-rw-r--r--fs/reiserfs/inode.c97
-rw-r--r--fs/reiserfs/ioctl.c77
-rw-r--r--fs/reiserfs/journal.c130
-rw-r--r--fs/reiserfs/lock.c88
-rw-r--r--fs/reiserfs/namei.c20
-rw-r--r--fs/reiserfs/prints.c4
-rw-r--r--fs/reiserfs/resize.c2
-rw-r--r--fs/reiserfs/stree.c53
-rw-r--r--fs/reiserfs/super.c52
-rw-r--r--fs/reiserfs/xattr.c6
-rw-r--r--fs/splice.c24
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c62
-rw-r--r--fs/xfs/quota/xfs_dquot.h2
133 files changed, 3131 insertions, 1972 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a346..e777961939f3 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
343 343
344 BUG_ON(!vcookie->fscache); 344 BUG_ON(!vcookie->fscache);
345 345
346 if (PageFsCache(page)) { 346 return fscache_maybe_release_page(vcookie->fscache, page, gfp);
347 if (fscache_check_page_write(vcookie->fscache, page)) {
348 if (!(gfp & __GFP_WAIT))
349 return 0;
350 fscache_wait_on_page_write(vcookie->fscache, page);
351 }
352
353 fscache_uncache_page(vcookie->fscache, page);
354 ClearPageFsCache(page);
355 }
356
357 return 1;
358} 347}
359 348
360void __v9fs_fscache_invalidate_page(struct page *page) 349void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
368 fscache_wait_on_page_write(vcookie->fscache, page); 357 fscache_wait_on_page_write(vcookie->fscache, page);
369 BUG_ON(!PageLocked(page)); 358 BUG_ON(!PageLocked(page));
370 fscache_uncache_page(vcookie->fscache, page); 359 fscache_uncache_page(vcookie->fscache, page);
371 ClearPageFsCache(page);
372 } 360 }
373} 361}
374 362
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013f..39b301662f22 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
316 fscache_wait_on_page_write(vnode->cache, page); 316 fscache_wait_on_page_write(vnode->cache, page);
317 fscache_uncache_page(vnode->cache, page); 317 fscache_uncache_page(vnode->cache, page);
318 ClearPageFsCache(page);
319 } 318 }
320#endif 319#endif
321 320
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
349 /* deny if page is being written to the cache and the caller hasn't 348 /* deny if page is being written to the cache and the caller hasn't
350 * elected to wait */ 349 * elected to wait */
351#ifdef CONFIG_AFS_FSCACHE 350#ifdef CONFIG_AFS_FSCACHE
352 if (PageFsCache(page)) { 351 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
353 if (fscache_check_page_write(vnode->cache, page)) { 352 _leave(" = F [cache busy]");
354 if (!(gfp_flags & __GFP_WAIT)) { 353 return 0;
355 _leave(" = F [cache busy]");
356 return 0;
357 }
358 fscache_wait_on_page_write(vnode->cache, page);
359 }
360
361 fscache_uncache_page(vnode->cache, page);
362 ClearPageFsCache(page);
363 } 354 }
364#endif 355#endif
365 356
diff --git a/fs/aio.c b/fs/aio.c
index 02a2c9340573..c30dfc006108 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -15,6 +15,7 @@
15#include <linux/aio_abi.h> 15#include <linux/aio_abi.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/syscalls.h> 17#include <linux/syscalls.h>
18#include <linux/backing-dev.h>
18#include <linux/uio.h> 19#include <linux/uio.h>
19 20
20#define DEBUG 0 21#define DEBUG 0
@@ -32,6 +33,9 @@
32#include <linux/workqueue.h> 33#include <linux/workqueue.h>
33#include <linux/security.h> 34#include <linux/security.h>
34#include <linux/eventfd.h> 35#include <linux/eventfd.h>
36#include <linux/blkdev.h>
37#include <linux/mempool.h>
38#include <linux/hash.h>
35 39
36#include <asm/kmap_types.h> 40#include <asm/kmap_types.h>
37#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -60,6 +64,14 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
60static DEFINE_SPINLOCK(fput_lock); 64static DEFINE_SPINLOCK(fput_lock);
61static LIST_HEAD(fput_head); 65static LIST_HEAD(fput_head);
62 66
67#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
68#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
69struct aio_batch_entry {
70 struct hlist_node list;
71 struct address_space *mapping;
72};
73mempool_t *abe_pool;
74
63static void aio_kick_handler(struct work_struct *); 75static void aio_kick_handler(struct work_struct *);
64static void aio_queue_work(struct kioctx *); 76static void aio_queue_work(struct kioctx *);
65 77
@@ -73,6 +85,8 @@ static int __init aio_setup(void)
73 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 85 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
74 86
75 aio_wq = create_workqueue("aio"); 87 aio_wq = create_workqueue("aio");
88 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
89 BUG_ON(!abe_pool);
76 90
77 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 91 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
78 92
@@ -1531,8 +1545,44 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1531 return 1; 1545 return 1;
1532} 1546}
1533 1547
1548static void aio_batch_add(struct address_space *mapping,
1549 struct hlist_head *batch_hash)
1550{
1551 struct aio_batch_entry *abe;
1552 struct hlist_node *pos;
1553 unsigned bucket;
1554
1555 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
1556 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
1557 if (abe->mapping == mapping)
1558 return;
1559 }
1560
1561 abe = mempool_alloc(abe_pool, GFP_KERNEL);
1562 BUG_ON(!igrab(mapping->host));
1563 abe->mapping = mapping;
1564 hlist_add_head(&abe->list, &batch_hash[bucket]);
1565 return;
1566}
1567
1568static void aio_batch_free(struct hlist_head *batch_hash)
1569{
1570 struct aio_batch_entry *abe;
1571 struct hlist_node *pos, *n;
1572 int i;
1573
1574 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
1575 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
1576 blk_run_address_space(abe->mapping);
1577 iput(abe->mapping->host);
1578 hlist_del(&abe->list);
1579 mempool_free(abe, abe_pool);
1580 }
1581 }
1582}
1583
1534static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1584static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1535 struct iocb *iocb) 1585 struct iocb *iocb, struct hlist_head *batch_hash)
1536{ 1586{
1537 struct kiocb *req; 1587 struct kiocb *req;
1538 struct file *file; 1588 struct file *file;
@@ -1608,6 +1658,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1608 ; 1658 ;
1609 } 1659 }
1610 spin_unlock_irq(&ctx->ctx_lock); 1660 spin_unlock_irq(&ctx->ctx_lock);
1661 if (req->ki_opcode == IOCB_CMD_PREAD ||
1662 req->ki_opcode == IOCB_CMD_PREADV ||
1663 req->ki_opcode == IOCB_CMD_PWRITE ||
1664 req->ki_opcode == IOCB_CMD_PWRITEV)
1665 aio_batch_add(file->f_mapping, batch_hash);
1666
1611 aio_put_req(req); /* drop extra ref to req */ 1667 aio_put_req(req); /* drop extra ref to req */
1612 return 0; 1668 return 0;
1613 1669
@@ -1635,6 +1691,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1635 struct kioctx *ctx; 1691 struct kioctx *ctx;
1636 long ret = 0; 1692 long ret = 0;
1637 int i; 1693 int i;
1694 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
1638 1695
1639 if (unlikely(nr < 0)) 1696 if (unlikely(nr < 0))
1640 return -EINVAL; 1697 return -EINVAL;
@@ -1666,10 +1723,11 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1666 break; 1723 break;
1667 } 1724 }
1668 1725
1669 ret = io_submit_one(ctx, user_iocb, &tmp); 1726 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
1670 if (ret) 1727 if (ret)
1671 break; 1728 break;
1672 } 1729 }
1730 aio_batch_free(batch_hash);
1673 1731
1674 put_ioctx(ctx); 1732 put_ioctx(ctx);
1675 return i ? i : ret; 1733 return i ? i : ret;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b9b3bb51b1e4..d15ea1790bfb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -767,7 +767,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
767 767
768 current->mm->start_stack = bprm->p; 768 current->mm->start_stack = bprm->p;
769 769
770 /* Now we do a little grungy work by mmaping the ELF image into 770 /* Now we do a little grungy work by mmapping the ELF image into
771 the correct location in memory. */ 771 the correct location in memory. */
772 for(i = 0, elf_ppnt = elf_phdata; 772 for(i = 0, elf_ppnt = elf_phdata;
773 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 773 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
diff --git a/fs/bio.c b/fs/bio.c
index 12da5db8682c..76e6713abf94 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -272,7 +272,7 @@ EXPORT_SYMBOL(bio_init);
272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
273 * fall back to just using @kmalloc to allocate the required memory. 273 * fall back to just using @kmalloc to allocate the required memory.
274 * 274 *
275 * Note that the caller must set ->bi_destructor on succesful return 275 * Note that the caller must set ->bi_destructor on successful return
276 * of a bio, to do the appropriate freeing of the bio once the reference 276 * of a bio, to do the appropriate freeing of the bio once the reference
277 * count drops to zero. 277 * count drops to zero.
278 **/ 278 **/
@@ -1393,6 +1393,18 @@ void bio_check_pages_dirty(struct bio *bio)
1393 } 1393 }
1394} 1394}
1395 1395
1396#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1397void bio_flush_dcache_pages(struct bio *bi)
1398{
1399 int i;
1400 struct bio_vec *bvec;
1401
1402 bio_for_each_segment(bvec, bi, i)
1403 flush_dcache_page(bvec->bv_page);
1404}
1405EXPORT_SYMBOL(bio_flush_dcache_pages);
1406#endif
1407
1396/** 1408/**
1397 * bio_endio - end I/O on a bio 1409 * bio_endio - end I/O on a bio
1398 * @bio: bio 1410 * @bio: bio
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 8bed0557d88c..73d6a735b8f3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -405,7 +405,17 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
405 405
406static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) 406static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
407{ 407{
408 return sync_blockdev(I_BDEV(filp->f_mapping->host)); 408 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
409 int error;
410
411 error = sync_blockdev(bdev);
412 if (error)
413 return error;
414
415 error = blkdev_issue_flush(bdev, NULL);
416 if (error == -EOPNOTSUPP)
417 error = 0;
418 return error;
409} 419}
410 420
411/* 421/*
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index ccbdcb54ec5d..46bea0f4dc7b 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -256,7 +256,7 @@ out:
256 * Insert @em into @tree or perform a simple forward/backward merge with 256 * Insert @em into @tree or perform a simple forward/backward merge with
257 * existing mappings. The extent_map struct passed in will be inserted 257 * existing mappings. The extent_map struct passed in will be inserted
258 * into the tree directly, with an additional reference taken, or a 258 * into the tree directly, with an additional reference taken, or a
259 * reference dropped if the merge attempt was sucessfull. 259 * reference dropped if the merge attempt was successfull.
260 */ 260 */
261int add_extent_mapping(struct extent_map_tree *tree, 261int add_extent_mapping(struct extent_map_tree *tree,
262 struct extent_map *em) 262 struct extent_map *em)
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a7..27089311fbea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
114 114
115/* 115/*
116 * attempt to look up the nominated node in this cache 116 * attempt to look up the nominated node in this cache
117 * - return -ETIMEDOUT to be scheduled again
117 */ 118 */
118static void cachefiles_lookup_object(struct fscache_object *_object) 119static int cachefiles_lookup_object(struct fscache_object *_object)
119{ 120{
120 struct cachefiles_lookup_data *lookup_data; 121 struct cachefiles_lookup_data *lookup_data;
121 struct cachefiles_object *parent, *object; 122 struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
145 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) 146 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
146 cachefiles_attr_changed(&object->fscache); 147 cachefiles_attr_changed(&object->fscache);
147 148
148 if (ret < 0) { 149 if (ret < 0 && ret != -ETIMEDOUT) {
149 printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", 150 if (ret != -ENOBUFS)
150 ret); 151 printk(KERN_WARNING
152 "CacheFiles: Lookup failed error %d\n", ret);
151 fscache_object_lookup_error(&object->fscache); 153 fscache_object_lookup_error(&object->fscache);
152 } 154 }
153 155
154 _leave(" [%d]", ret); 156 _leave(" [%d]", ret);
157 return ret;
155} 158}
156 159
157/* 160/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
331 } 334 }
332 335
333 cache = object->fscache.cache; 336 cache = object->fscache.cache;
337 fscache_object_destroy(&object->fscache);
334 kmem_cache_free(cachefiles_object_jar, object); 338 kmem_cache_free(cachefiles_object_jar, object);
335 fscache_object_destroyed(cache); 339 fscache_object_destroyed(cache);
336 } 340 }
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
403 if (oi_size == ni_size) 407 if (oi_size == ni_size)
404 return 0; 408 return 0;
405 409
406 newattrs.ia_size = ni_size;
407 newattrs.ia_valid = ATTR_SIZE;
408
409 cachefiles_begin_secure(cache, &saved_cred); 410 cachefiles_begin_secure(cache, &saved_cred);
410 mutex_lock(&object->backer->d_inode->i_mutex); 411 mutex_lock(&object->backer->d_inode->i_mutex);
412
413 /* if there's an extension to a partial page at the end of the backing
414 * file, we need to discard the partial page so that we pick up new
415 * data after it */
416 if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
417 _debug("discard tail %llx", oi_size);
418 newattrs.ia_valid = ATTR_SIZE;
419 newattrs.ia_size = oi_size & PAGE_MASK;
420 ret = notify_change(object->backer, &newattrs);
421 if (ret < 0)
422 goto truncate_failed;
423 }
424
425 newattrs.ia_valid = ATTR_SIZE;
426 newattrs.ia_size = ni_size;
411 ret = notify_change(object->backer, &newattrs); 427 ret = notify_change(object->backer, &newattrs);
428
429truncate_failed:
412 mutex_unlock(&object->backer->d_inode->i_mutex); 430 mutex_unlock(&object->backer->d_inode->i_mutex);
413 cachefiles_end_secure(cache, saved_cred); 431 cachefiles_end_secure(cache, saved_cred);
414 432
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39ea..14ac4806e291 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
21#include <linux/security.h> 21#include <linux/security.h>
22#include "internal.h" 22#include "internal.h"
23 23
24static int cachefiles_wait_bit(void *flags) 24#define CACHEFILES_KEYBUF_SIZE 512
25
26/*
27 * dump debugging info about an object
28 */
29static noinline
30void __cachefiles_printk_object(struct cachefiles_object *object,
31 const char *prefix,
32 u8 *keybuf)
25{ 33{
26 schedule(); 34 struct fscache_cookie *cookie;
27 return 0; 35 unsigned keylen, loop;
36
37 printk(KERN_ERR "%sobject: OBJ%x\n",
38 prefix, object->fscache.debug_id);
39 printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
40 prefix, fscache_object_states[object->fscache.state],
41 object->fscache.flags, object->fscache.work.flags,
42 object->fscache.events,
43 object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
44 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
46 object->fscache.n_exclusive);
47 printk(KERN_ERR "%sparent=%p\n",
48 prefix, object->fscache.parent);
49
50 spin_lock(&object->fscache.lock);
51 cookie = object->fscache.cookie;
52 if (cookie) {
53 printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 prefix,
55 object->fscache.cookie,
56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags);
59 if (keybuf)
60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
61 CACHEFILES_KEYBUF_SIZE);
62 else
63 keylen = 0;
64 } else {
65 printk(KERN_ERR "%scookie=NULL\n", prefix);
66 keylen = 0;
67 }
68 spin_unlock(&object->fscache.lock);
69
70 if (keylen) {
71 printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
72 for (loop = 0; loop < keylen; loop++)
73 printk("%02x", keybuf[loop]);
74 printk("'\n");
75 }
76}
77
78/*
79 * dump debugging info about a pair of objects
80 */
81static noinline void cachefiles_printk_object(struct cachefiles_object *object,
82 struct cachefiles_object *xobject)
83{
84 u8 *keybuf;
85
86 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
87 if (object)
88 __cachefiles_printk_object(object, "", keybuf);
89 if (xobject)
90 __cachefiles_printk_object(xobject, "x", keybuf);
91 kfree(keybuf);
28} 92}
29 93
30/* 94/*
31 * record the fact that an object is now active 95 * record the fact that an object is now active
32 */ 96 */
33static void cachefiles_mark_object_active(struct cachefiles_cache *cache, 97static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
34 struct cachefiles_object *object) 98 struct cachefiles_object *object)
35{ 99{
36 struct cachefiles_object *xobject; 100 struct cachefiles_object *xobject;
37 struct rb_node **_p, *_parent = NULL; 101 struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
42try_again: 106try_again:
43 write_lock(&cache->active_lock); 107 write_lock(&cache->active_lock);
44 108
45 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) 109 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
110 printk(KERN_ERR "CacheFiles: Error: Object already active\n");
111 cachefiles_printk_object(object, NULL);
46 BUG(); 112 BUG();
113 }
47 114
48 dentry = object->dentry; 115 dentry = object->dentry;
49 _p = &cache->active_nodes.rb_node; 116 _p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
66 rb_insert_color(&object->active_node, &cache->active_nodes); 133 rb_insert_color(&object->active_node, &cache->active_nodes);
67 134
68 write_unlock(&cache->active_lock); 135 write_unlock(&cache->active_lock);
69 _leave(""); 136 _leave(" = 0");
70 return; 137 return 0;
71 138
72 /* an old object from a previous incarnation is hogging the slot - we 139 /* an old object from a previous incarnation is hogging the slot - we
73 * need to wait for it to be destroyed */ 140 * need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
76 printk(KERN_ERR "\n"); 143 printk(KERN_ERR "\n");
77 printk(KERN_ERR "CacheFiles: Error:" 144 printk(KERN_ERR "CacheFiles: Error:"
78 " Unexpected object collision\n"); 145 " Unexpected object collision\n");
79 printk(KERN_ERR "xobject: OBJ%x\n", 146 cachefiles_printk_object(object, xobject);
80 xobject->fscache.debug_id);
81 printk(KERN_ERR "xobjstate=%s\n",
82 fscache_object_states[xobject->fscache.state]);
83 printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
84 printk(KERN_ERR "xobjevent=%lx [%lx]\n",
85 xobject->fscache.events, xobject->fscache.event_mask);
86 printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
87 xobject->fscache.n_ops, xobject->fscache.n_in_progress,
88 xobject->fscache.n_exclusive);
89 printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
90 xobject->fscache.cookie,
91 xobject->fscache.cookie->parent,
92 xobject->fscache.cookie->netfs_data,
93 xobject->fscache.cookie->flags);
94 printk(KERN_ERR "xparent=%p\n",
95 xobject->fscache.parent);
96 printk(KERN_ERR "object: OBJ%x\n",
97 object->fscache.debug_id);
98 printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
99 object->fscache.cookie,
100 object->fscache.cookie->parent,
101 object->fscache.cookie->netfs_data,
102 object->fscache.cookie->flags);
103 printk(KERN_ERR "parent=%p\n",
104 object->fscache.parent);
105 BUG(); 147 BUG();
106 } 148 }
107 atomic_inc(&xobject->usage); 149 atomic_inc(&xobject->usage);
108 write_unlock(&cache->active_lock); 150 write_unlock(&cache->active_lock);
109 151
110 _debug(">>> wait"); 152 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
111 wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, 153 wait_queue_head_t *wq;
112 cachefiles_wait_bit, TASK_UNINTERRUPTIBLE); 154
113 _debug("<<< waited"); 155 signed long timeout = 60 * HZ;
156 wait_queue_t wait;
157 bool requeue;
158
159 /* if the object we're waiting for is queued for processing,
160 * then just put ourselves on the queue behind it */
161 if (slow_work_is_queued(&xobject->fscache.work)) {
162 _debug("queue OBJ%x behind OBJ%x immediately",
163 object->fscache.debug_id,
164 xobject->fscache.debug_id);
165 goto requeue;
166 }
167
168 /* otherwise we sleep until either the object we're waiting for
169 * is done, or the slow-work facility wants the thread back to
170 * do other work */
171 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
172 init_wait(&wait);
173 requeue = false;
174 do {
175 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
176 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
177 break;
178 requeue = slow_work_sleep_till_thread_needed(
179 &object->fscache.work, &timeout);
180 } while (timeout > 0 && !requeue);
181 finish_wait(wq, &wait);
182
183 if (requeue &&
184 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
185 _debug("queue OBJ%x behind OBJ%x after wait",
186 object->fscache.debug_id,
187 xobject->fscache.debug_id);
188 goto requeue;
189 }
190
191 if (timeout <= 0) {
192 printk(KERN_ERR "\n");
193 printk(KERN_ERR "CacheFiles: Error: Overlong"
194 " wait for old active object to go away\n");
195 cachefiles_printk_object(object, xobject);
196 goto requeue;
197 }
198 }
199
200 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
114 201
115 cache->cache.ops->put_object(&xobject->fscache); 202 cache->cache.ops->put_object(&xobject->fscache);
116 goto try_again; 203 goto try_again;
204
205requeue:
206 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
207 cache->cache.ops->put_object(&xobject->fscache);
208 _leave(" = -ETIMEDOUT");
209 return -ETIMEDOUT;
117} 210}
118 211
119/* 212/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
254 347
255 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
256 349
257 mutex_lock(&dir->d_inode->i_mutex); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
258 ret = cachefiles_bury_object(cache, dir, object->dentry); 351 ret = cachefiles_bury_object(cache, dir, object->dentry);
259 352
260 dput(dir); 353 dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
307 /* search the current directory for the element name */ 400 /* search the current directory for the element name */
308 _debug("lookup '%s'", name); 401 _debug("lookup '%s'", name);
309 402
310 mutex_lock(&dir->d_inode->i_mutex); 403 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
311 404
312 start = jiffies; 405 start = jiffies;
313 next = lookup_one_len(name, dir, nlen); 406 next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
418 } 511 }
419 512
420 /* note that we're now using this object */ 513 /* note that we're now using this object */
421 cachefiles_mark_object_active(cache, object); 514 ret = cachefiles_mark_object_active(cache, object);
422 515
423 mutex_unlock(&dir->d_inode->i_mutex); 516 mutex_unlock(&dir->d_inode->i_mutex);
424 dput(dir); 517 dput(dir);
425 dir = NULL; 518 dir = NULL;
426 519
520 if (ret == -ETIMEDOUT)
521 goto mark_active_timed_out;
522
427 _debug("=== OBTAINED_OBJECT ==="); 523 _debug("=== OBTAINED_OBJECT ===");
428 524
429 if (object->new) { 525 if (object->new) {
@@ -467,6 +563,10 @@ create_error:
467 cachefiles_io_error(cache, "Create/mkdir failed"); 563 cachefiles_io_error(cache, "Create/mkdir failed");
468 goto error; 564 goto error;
469 565
566mark_active_timed_out:
567 _debug("mark active timed out");
568 goto release_dentry;
569
470check_error: 570check_error:
471 _debug("check error %d", ret); 571 _debug("check error %d", ret);
472 write_lock(&cache->active_lock); 572 write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
474 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 574 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
475 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 575 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
476 write_unlock(&cache->active_lock); 576 write_unlock(&cache->active_lock);
477 577release_dentry:
478 dput(object->dentry); 578 dput(object->dentry);
479 object->dentry = NULL; 579 object->dentry = NULL;
480 goto error_out; 580 goto error_out;
@@ -495,9 +595,6 @@ error:
495error_out2: 595error_out2:
496 dput(dir); 596 dput(dir);
497error_out: 597error_out:
498 if (ret == -ENOSPC)
499 ret = -ENOBUFS;
500
501 _leave(" = error %d", -ret); 598 _leave(" = error %d", -ret);
502 return ret; 599 return ret;
503} 600}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd96..a6c8c6fe8df9 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/mount.h> 12#include <linux/mount.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/ima.h>
14#include "internal.h" 15#include "internal.h"
15 16
16/* 17/*
@@ -40,8 +41,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
40 41
41 _debug("--- monitor %p %lx ---", page, page->flags); 42 _debug("--- monitor %p %lx ---", page, page->flags);
42 43
43 if (!PageUptodate(page) && !PageError(page)) 44 if (!PageUptodate(page) && !PageError(page)) {
44 dump_stack(); 45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
47 }
45 48
46 /* remove from the waitqueue */ 49 /* remove from the waitqueue */
47 list_del(&wait->task_list); 50 list_del(&wait->task_list);
@@ -61,6 +64,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
61} 64}
62 65
63/* 66/*
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
69 * possible
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
72 */
73static int cachefiles_read_reissue(struct cachefiles_object *object,
74 struct cachefiles_one_read *monitor)
75{
76 struct address_space *bmapping = object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->back_page, *backpage2;
78 int ret;
79
80 kenter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags);
83
84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) {
86 kleave(" = -ENODATA [mapping]");
87 return -ENODATA;
88 }
89
90 backpage2 = find_get_page(bmapping, backpage->index);
91 if (!backpage2) {
92 kleave(" = -ENODATA [gone]");
93 return -ENODATA;
94 }
95
96 if (backpage != backpage2) {
97 put_page(backpage2);
98 kleave(" = -ENODATA [different]");
99 return -ENODATA;
100 }
101
102 /* the page is still there and we already have a ref on it, so we don't
103 * need a second */
104 put_page(backpage2);
105
106 INIT_LIST_HEAD(&monitor->op_link);
107 add_page_wait_queue(backpage, &monitor->monitor);
108
109 if (trylock_page(backpage)) {
110 ret = -EIO;
111 if (PageError(backpage))
112 goto unlock_discard;
113 ret = 0;
114 if (PageUptodate(backpage))
115 goto unlock_discard;
116
117 kdebug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage);
119 if (ret < 0)
120 goto unlock_discard;
121 }
122
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage)) {
127 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
128 unlock_page(backpage);
129 }
130
131 /* it'll reappear on the todo list */
132 kleave(" = -EINPROGRESS");
133 return -EINPROGRESS;
134
135unlock_discard:
136 unlock_page(backpage);
137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock);
140 kleave(" = %d", ret);
141 return ret;
142}
143
144/*
64 * copy data from backing pages to netfs pages to complete a read operation 145 * copy data from backing pages to netfs pages to complete a read operation
65 * - driven by FS-Cache's thread pool 146 * - driven by FS-Cache's thread pool
66 */ 147 */
@@ -92,20 +173,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
92 173
93 _debug("- copy {%lu}", monitor->back_page->index); 174 _debug("- copy {%lu}", monitor->back_page->index);
94 175
95 error = -EIO; 176 recheck:
96 if (PageUptodate(monitor->back_page)) { 177 if (PageUptodate(monitor->back_page)) {
97 copy_highpage(monitor->netfs_page, monitor->back_page); 178 copy_highpage(monitor->netfs_page, monitor->back_page);
98 179
99 pagevec_add(&pagevec, monitor->netfs_page); 180 pagevec_add(&pagevec, monitor->netfs_page);
100 fscache_mark_pages_cached(monitor->op, &pagevec); 181 fscache_mark_pages_cached(monitor->op, &pagevec);
101 error = 0; 182 error = 0;
102 } 183 } else if (!PageError(monitor->back_page)) {
103 184 /* the page has probably been truncated */
104 if (error) 185 error = cachefiles_read_reissue(object, monitor);
186 if (error == -EINPROGRESS)
187 goto next;
188 goto recheck;
189 } else {
105 cachefiles_io_error_obj( 190 cachefiles_io_error_obj(
106 object, 191 object,
107 "Readpage failed on backing file %lx", 192 "Readpage failed on backing file %lx",
108 (unsigned long) monitor->back_page->flags); 193 (unsigned long) monitor->back_page->flags);
194 error = -EIO;
195 }
109 196
110 page_cache_release(monitor->back_page); 197 page_cache_release(monitor->back_page);
111 198
@@ -114,6 +201,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
114 fscache_put_retrieval(op); 201 fscache_put_retrieval(op);
115 kfree(monitor); 202 kfree(monitor);
116 203
204 next:
117 /* let the thread pool have some air occasionally */ 205 /* let the thread pool have some air occasionally */
118 max--; 206 max--;
119 if (max < 0 || need_resched()) { 207 if (max < 0 || need_resched()) {
@@ -333,7 +421,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
333 421
334 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 422 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
335 423
336 op->op.flags = FSCACHE_OP_FAST; 424 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
425 op->op.flags |= FSCACHE_OP_FAST;
337 op->op.processor = cachefiles_read_copier; 426 op->op.processor = cachefiles_read_copier;
338 427
339 pagevec_init(&pagevec, 0); 428 pagevec_init(&pagevec, 0);
@@ -639,7 +728,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
639 728
640 pagevec_init(&pagevec, 0); 729 pagevec_init(&pagevec, 0);
641 730
642 op->op.flags = FSCACHE_OP_FAST; 731 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
732 op->op.flags |= FSCACHE_OP_FAST;
643 op->op.processor = cachefiles_read_copier; 733 op->op.processor = cachefiles_read_copier;
644 734
645 INIT_LIST_HEAD(&backpages); 735 INIT_LIST_HEAD(&backpages);
@@ -801,7 +891,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
801 struct cachefiles_cache *cache; 891 struct cachefiles_cache *cache;
802 mm_segment_t old_fs; 892 mm_segment_t old_fs;
803 struct file *file; 893 struct file *file;
804 loff_t pos; 894 loff_t pos, eof;
895 size_t len;
805 void *data; 896 void *data;
806 int ret; 897 int ret;
807 898
@@ -832,18 +923,33 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
832 if (IS_ERR(file)) { 923 if (IS_ERR(file)) {
833 ret = PTR_ERR(file); 924 ret = PTR_ERR(file);
834 } else { 925 } else {
926 ima_counts_get(file);
835 ret = -EIO; 927 ret = -EIO;
836 if (file->f_op->write) { 928 if (file->f_op->write) {
837 pos = (loff_t) page->index << PAGE_SHIFT; 929 pos = (loff_t) page->index << PAGE_SHIFT;
930
931 /* we mustn't write more data than we have, so we have
932 * to beware of a partial page at EOF */
933 eof = object->fscache.store_limit_l;
934 len = PAGE_SIZE;
935 if (eof & ~PAGE_MASK) {
936 ASSERTCMP(pos, <, eof);
937 if (eof - pos < PAGE_SIZE) {
938 _debug("cut short %llx to %llx",
939 pos, eof);
940 len = eof - pos;
941 ASSERTCMP(pos + len, ==, eof);
942 }
943 }
944
838 data = kmap(page); 945 data = kmap(page);
839 old_fs = get_fs(); 946 old_fs = get_fs();
840 set_fs(KERNEL_DS); 947 set_fs(KERNEL_DS);
841 ret = file->f_op->write( 948 ret = file->f_op->write(
842 file, (const void __user *) data, PAGE_SIZE, 949 file, (const void __user *) data, len, &pos);
843 &pos);
844 set_fs(old_fs); 950 set_fs(old_fs);
845 kunmap(page); 951 kunmap(page);
846 if (ret != PAGE_SIZE) 952 if (ret != len)
847 ret = -EIO; 953 ret = -EIO;
848 } 954 }
849 fput(file); 955 fput(file);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 145540a316ab..094ea65afc85 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,12 @@
1Version 1.61
2------------
3Fix append problem to Samba servers (files opened with O_APPEND could
4have duplicated data). Fix oops in cifs_lookup. Workaround problem
5mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
6Disable use of server inode numbers when server only
7partially supports them (e.g. for one server querying inode numbers on
8FindFirst fails but QPathInfo queries works).
9
1Version 1.60 10Version 1.60
2------------- 11-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path. 12Fix memory leak in reconnect. Fix oops in DFS mount error path.
diff --git a/fs/cifs/README b/fs/cifs/README
index 79c1a93400be..a727b7cb075f 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -423,7 +423,7 @@ A partial list of the supported mount options follows:
423 source name to use to represent the client netbios machine 423 source name to use to represent the client netbios machine
424 name when doing the RFC1001 netbios session initialize. 424 name when doing the RFC1001 netbios session initialize.
425 direct Do not do inode data caching on files opened on this mount. 425 direct Do not do inode data caching on files opened on this mount.
426 This precludes mmaping files on this mount. In some cases 426 This precludes mmapping files on this mount. In some cases
427 with fast networks and little or no caching benefits on the 427 with fast networks and little or no caching benefits on the
428 client (e.g. when the application is doing large sequential 428 client (e.g. when the application is doing large sequential
429 reads bigger than page size without rereading the same data) 429 reads bigger than page size without rereading the same data)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9a5e4f5f3122..29f1da761bbf 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1037,7 +1037,7 @@ init_cifs(void)
1037 if (rc) 1037 if (rc)
1038 goto out_unregister_key_type; 1038 goto out_unregister_key_type;
1039#endif 1039#endif
1040 rc = slow_work_register_user(); 1040 rc = slow_work_register_user(THIS_MODULE);
1041 if (rc) 1041 if (rc)
1042 goto out_unregister_resolver_key; 1042 goto out_unregister_resolver_key;
1043 1043
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 5d0fde18039c..4b35f7ec0583 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -39,7 +39,7 @@
39 39
40/* 40/*
41 * MAX_REQ is the maximum number of requests that WE will send 41 * MAX_REQ is the maximum number of requests that WE will send
42 * on one socket concurently. It also matches the most common 42 * on one socket concurrently. It also matches the most common
43 * value of max multiplex returned by servers. We may 43 * value of max multiplex returned by servers. We may
44 * eventually want to use the negotiated value (in case 44 * eventually want to use the negotiated value (in case
45 * future servers can handle more) when we are more confident that 45 * future servers can handle more) when we are more confident that
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 2d07f890a842..3877737f96a6 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1227,7 +1227,7 @@ typedef struct smb_com_setattr_rsp {
1227/* empty wct response to setattr */ 1227/* empty wct response to setattr */
1228 1228
1229/*******************************************************/ 1229/*******************************************************/
1230/* NT Transact structure defintions follow */ 1230/* NT Transact structure definitions follow */
1231/* Currently only ioctl, acl (get security descriptor) */ 1231/* Currently only ioctl, acl (get security descriptor) */
1232/* and notify are implemented */ 1232/* and notify are implemented */
1233/*******************************************************/ 1233/*******************************************************/
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 627a60a6c1b1..1f42f772865a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
214 posix_flags |= SMB_O_EXCL; 214 posix_flags |= SMB_O_EXCL;
215 if (oflags & O_TRUNC) 215 if (oflags & O_TRUNC)
216 posix_flags |= SMB_O_TRUNC; 216 posix_flags |= SMB_O_TRUNC;
217 if (oflags & O_APPEND)
218 posix_flags |= SMB_O_APPEND;
219 if (oflags & O_SYNC) 217 if (oflags & O_SYNC)
220 posix_flags |= SMB_O_SYNC; 218 posix_flags |= SMB_O_SYNC;
221 if (oflags & O_DIRECTORY) 219 if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
643 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let 641 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
644 * the VFS handle the create. 642 * the VFS handle the create.
645 */ 643 */
646 if (nd->flags & LOOKUP_EXCL) { 644 if (nd && (nd->flags & LOOKUP_EXCL)) {
647 d_instantiate(direntry, NULL); 645 d_instantiate(direntry, NULL);
648 return 0; 646 return NULL;
649 } 647 }
650 648
651 /* can not grab the rename sem here since it would 649 /* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
675 * reduction in network traffic in the other paths. 673 * reduction in network traffic in the other paths.
676 */ 674 */
677 if (pTcon->unix_ext) { 675 if (pTcon->unix_ext) {
678 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 676 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
679 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 677 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
680 (nd->intent.open.flags & O_CREAT)) { 678 (nd->intent.open.flags & O_CREAT)) {
681 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, 679 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cababd8a52df..cf18ee765590 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -914,8 +914,8 @@ undo_setattr:
914/* 914/*
915 * If dentry->d_inode is null (usually meaning the cached dentry 915 * If dentry->d_inode is null (usually meaning the cached dentry
916 * is a negative dentry) then we would attempt a standard SMB delete, but 916 * is a negative dentry) then we would attempt a standard SMB delete, but
917 * if that fails we can not attempt the fall back mechanisms on EACESS 917 * if that fails we can not attempt the fall back mechanisms on EACCESS
918 * but will return the EACESS to the caller. Note that the VFS does not call 918 * but will return the EACCESS to the caller. Note that the VFS does not call
919 * unlink on negative dentries currently. 919 * unlink on negative dentries currently.
920 */ 920 */
921int cifs_unlink(struct inode *dir, struct dentry *dentry) 921int cifs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 224a1f478966..b6b6dcb500bf 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -371,7 +371,7 @@ E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
371 smbhash(p24 + 16, c8, p21 + 14, 1); 371 smbhash(p24 + 16, c8, p21 + 14, 1);
372} 372}
373 373
374#if 0 /* currently unsued */ 374#if 0 /* currently unused */
375static void 375static void
376D_P16(unsigned char *p14, unsigned char *in, unsigned char *out) 376D_P16(unsigned char *p14, unsigned char *in, unsigned char *out)
377{ 377{
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 43c96ce29614..c6405ce3c50e 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -17,28 +17,25 @@ static struct ctl_table_header *fs_table_header;
17 17
18static ctl_table coda_table[] = { 18static ctl_table coda_table[] = {
19 { 19 {
20 .ctl_name = CTL_UNNUMBERED,
21 .procname = "timeout", 20 .procname = "timeout",
22 .data = &coda_timeout, 21 .data = &coda_timeout,
23 .maxlen = sizeof(int), 22 .maxlen = sizeof(int),
24 .mode = 0644, 23 .mode = 0644,
25 .proc_handler = &proc_dointvec 24 .proc_handler = proc_dointvec
26 }, 25 },
27 { 26 {
28 .ctl_name = CTL_UNNUMBERED,
29 .procname = "hard", 27 .procname = "hard",
30 .data = &coda_hard, 28 .data = &coda_hard,
31 .maxlen = sizeof(int), 29 .maxlen = sizeof(int),
32 .mode = 0644, 30 .mode = 0644,
33 .proc_handler = &proc_dointvec 31 .proc_handler = proc_dointvec
34 }, 32 },
35 { 33 {
36 .ctl_name = CTL_UNNUMBERED,
37 .procname = "fake_statfs", 34 .procname = "fake_statfs",
38 .data = &coda_fake_statfs, 35 .data = &coda_fake_statfs,
39 .maxlen = sizeof(int), 36 .maxlen = sizeof(int),
40 .mode = 0600, 37 .mode = 0600,
41 .proc_handler = &proc_dointvec 38 .proc_handler = proc_dointvec
42 }, 39 },
43 {} 40 {}
44}; 41};
@@ -46,7 +43,6 @@ static ctl_table coda_table[] = {
46#ifdef CONFIG_SYSCTL 43#ifdef CONFIG_SYSCTL
47static ctl_table fs_table[] = { 44static ctl_table fs_table[] = {
48 { 45 {
49 .ctl_name = CTL_UNNUMBERED,
50 .procname = "coda", 46 .procname = "coda",
51 .mode = 0555, 47 .mode = 0555,
52 .child = coda_table 48 .child = coda_table
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index d84e7058c298..2346895b3a77 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -246,428 +246,6 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
246 return err; 246 return err;
247} 247}
248 248
249#ifdef CONFIG_NET
250static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
251{
252 struct compat_timeval __user *up = compat_ptr(arg);
253 struct timeval ktv;
254 mm_segment_t old_fs = get_fs();
255 int err;
256
257 set_fs(KERNEL_DS);
258 err = sys_ioctl(fd, cmd, (unsigned long)&ktv);
259 set_fs(old_fs);
260 if(!err) {
261 err = put_user(ktv.tv_sec, &up->tv_sec);
262 err |= __put_user(ktv.tv_usec, &up->tv_usec);
263 }
264 return err;
265}
266
267static int do_siocgstampns(unsigned int fd, unsigned int cmd, unsigned long arg)
268{
269 struct compat_timespec __user *up = compat_ptr(arg);
270 struct timespec kts;
271 mm_segment_t old_fs = get_fs();
272 int err;
273
274 set_fs(KERNEL_DS);
275 err = sys_ioctl(fd, cmd, (unsigned long)&kts);
276 set_fs(old_fs);
277 if (!err) {
278 err = put_user(kts.tv_sec, &up->tv_sec);
279 err |= __put_user(kts.tv_nsec, &up->tv_nsec);
280 }
281 return err;
282}
283
284struct ifmap32 {
285 compat_ulong_t mem_start;
286 compat_ulong_t mem_end;
287 unsigned short base_addr;
288 unsigned char irq;
289 unsigned char dma;
290 unsigned char port;
291};
292
293struct ifreq32 {
294#define IFHWADDRLEN 6
295#define IFNAMSIZ 16
296 union {
297 char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
298 } ifr_ifrn;
299 union {
300 struct sockaddr ifru_addr;
301 struct sockaddr ifru_dstaddr;
302 struct sockaddr ifru_broadaddr;
303 struct sockaddr ifru_netmask;
304 struct sockaddr ifru_hwaddr;
305 short ifru_flags;
306 compat_int_t ifru_ivalue;
307 compat_int_t ifru_mtu;
308 struct ifmap32 ifru_map;
309 char ifru_slave[IFNAMSIZ]; /* Just fits the size */
310 char ifru_newname[IFNAMSIZ];
311 compat_caddr_t ifru_data;
312 /* XXXX? ifru_settings should be here */
313 } ifr_ifru;
314};
315
316struct ifconf32 {
317 compat_int_t ifc_len; /* size of buffer */
318 compat_caddr_t ifcbuf;
319};
320
321static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
322{
323 struct ifreq __user *uifr;
324 int err;
325
326 uifr = compat_alloc_user_space(sizeof(struct ifreq));
327 if (copy_in_user(uifr, compat_ptr(arg), sizeof(struct ifreq32)))
328 return -EFAULT;
329
330 err = sys_ioctl(fd, SIOCGIFNAME, (unsigned long)uifr);
331 if (err)
332 return err;
333
334 if (copy_in_user(compat_ptr(arg), uifr, sizeof(struct ifreq32)))
335 return -EFAULT;
336
337 return 0;
338}
339
340static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
341{
342 struct ifconf32 ifc32;
343 struct ifconf ifc;
344 struct ifconf __user *uifc;
345 struct ifreq32 __user *ifr32;
346 struct ifreq __user *ifr;
347 unsigned int i, j;
348 int err;
349
350 if (copy_from_user(&ifc32, compat_ptr(arg), sizeof(struct ifconf32)))
351 return -EFAULT;
352
353 if (ifc32.ifcbuf == 0) {
354 ifc32.ifc_len = 0;
355 ifc.ifc_len = 0;
356 ifc.ifc_req = NULL;
357 uifc = compat_alloc_user_space(sizeof(struct ifconf));
358 } else {
359 size_t len =((ifc32.ifc_len / sizeof (struct ifreq32)) + 1) *
360 sizeof (struct ifreq);
361 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
362 ifc.ifc_len = len;
363 ifr = ifc.ifc_req = (void __user *)(uifc + 1);
364 ifr32 = compat_ptr(ifc32.ifcbuf);
365 for (i = 0; i < ifc32.ifc_len; i += sizeof (struct ifreq32)) {
366 if (copy_in_user(ifr, ifr32, sizeof(struct ifreq32)))
367 return -EFAULT;
368 ifr++;
369 ifr32++;
370 }
371 }
372 if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
373 return -EFAULT;
374
375 err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)uifc);
376 if (err)
377 return err;
378
379 if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
380 return -EFAULT;
381
382 ifr = ifc.ifc_req;
383 ifr32 = compat_ptr(ifc32.ifcbuf);
384 for (i = 0, j = 0;
385 i + sizeof (struct ifreq32) <= ifc32.ifc_len && j < ifc.ifc_len;
386 i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) {
387 if (copy_in_user(ifr32, ifr, sizeof (struct ifreq32)))
388 return -EFAULT;
389 ifr32++;
390 ifr++;
391 }
392
393 if (ifc32.ifcbuf == 0) {
394 /* Translate from 64-bit structure multiple to
395 * a 32-bit one.
396 */
397 i = ifc.ifc_len;
398 i = ((i / sizeof(struct ifreq)) * sizeof(struct ifreq32));
399 ifc32.ifc_len = i;
400 } else {
401 ifc32.ifc_len = i;
402 }
403 if (copy_to_user(compat_ptr(arg), &ifc32, sizeof(struct ifconf32)))
404 return -EFAULT;
405
406 return 0;
407}
408
409static int ethtool_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
410{
411 struct ifreq __user *ifr;
412 struct ifreq32 __user *ifr32;
413 u32 data;
414 void __user *datap;
415
416 ifr = compat_alloc_user_space(sizeof(*ifr));
417 ifr32 = compat_ptr(arg);
418
419 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
420 return -EFAULT;
421
422 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
423 return -EFAULT;
424
425 datap = compat_ptr(data);
426 if (put_user(datap, &ifr->ifr_ifru.ifru_data))
427 return -EFAULT;
428
429 return sys_ioctl(fd, cmd, (unsigned long) ifr);
430}
431
432static int bond_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
433{
434 struct ifreq kifr;
435 struct ifreq __user *uifr;
436 struct ifreq32 __user *ifr32 = compat_ptr(arg);
437 mm_segment_t old_fs;
438 int err;
439 u32 data;
440 void __user *datap;
441
442 switch (cmd) {
443 case SIOCBONDENSLAVE:
444 case SIOCBONDRELEASE:
445 case SIOCBONDSETHWADDR:
446 case SIOCBONDCHANGEACTIVE:
447 if (copy_from_user(&kifr, ifr32, sizeof(struct ifreq32)))
448 return -EFAULT;
449
450 old_fs = get_fs();
451 set_fs (KERNEL_DS);
452 err = sys_ioctl (fd, cmd, (unsigned long)&kifr);
453 set_fs (old_fs);
454
455 return err;
456 case SIOCBONDSLAVEINFOQUERY:
457 case SIOCBONDINFOQUERY:
458 uifr = compat_alloc_user_space(sizeof(*uifr));
459 if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
460 return -EFAULT;
461
462 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
463 return -EFAULT;
464
465 datap = compat_ptr(data);
466 if (put_user(datap, &uifr->ifr_ifru.ifru_data))
467 return -EFAULT;
468
469 return sys_ioctl (fd, cmd, (unsigned long)uifr);
470 default:
471 return -EINVAL;
472 };
473}
474
475static int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
476{
477 struct ifreq __user *u_ifreq64;
478 struct ifreq32 __user *u_ifreq32 = compat_ptr(arg);
479 char tmp_buf[IFNAMSIZ];
480 void __user *data64;
481 u32 data32;
482
483 if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
484 IFNAMSIZ))
485 return -EFAULT;
486 if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
487 return -EFAULT;
488 data64 = compat_ptr(data32);
489
490 u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
491
492 /* Don't check these user accesses, just let that get trapped
493 * in the ioctl handler instead.
494 */
495 if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
496 IFNAMSIZ))
497 return -EFAULT;
498 if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
499 return -EFAULT;
500
501 return sys_ioctl(fd, cmd, (unsigned long) u_ifreq64);
502}
503
504static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
505{
506 struct ifreq ifr;
507 struct ifreq32 __user *uifr32;
508 struct ifmap32 __user *uifmap32;
509 mm_segment_t old_fs;
510 int err;
511
512 uifr32 = compat_ptr(arg);
513 uifmap32 = &uifr32->ifr_ifru.ifru_map;
514 switch (cmd) {
515 case SIOCSIFMAP:
516 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
517 err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
518 err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
519 err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
520 err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
521 err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
522 err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
523 if (err)
524 return -EFAULT;
525 break;
526 case SIOCSHWTSTAMP:
527 if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
528 return -EFAULT;
529 ifr.ifr_data = compat_ptr(uifr32->ifr_ifru.ifru_data);
530 break;
531 default:
532 if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
533 return -EFAULT;
534 break;
535 }
536 old_fs = get_fs();
537 set_fs (KERNEL_DS);
538 err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
539 set_fs (old_fs);
540 if (!err) {
541 switch (cmd) {
542 /* TUNSETIFF is defined as _IOW, it should be _IORW
543 * as the data is copied back to user space, but that
544 * cannot be fixed without breaking all existing apps.
545 */
546 case TUNSETIFF:
547 case TUNGETIFF:
548 case SIOCGIFFLAGS:
549 case SIOCGIFMETRIC:
550 case SIOCGIFMTU:
551 case SIOCGIFMEM:
552 case SIOCGIFHWADDR:
553 case SIOCGIFINDEX:
554 case SIOCGIFADDR:
555 case SIOCGIFBRDADDR:
556 case SIOCGIFDSTADDR:
557 case SIOCGIFNETMASK:
558 case SIOCGIFTXQLEN:
559 if (copy_to_user(uifr32, &ifr, sizeof(*uifr32)))
560 return -EFAULT;
561 break;
562 case SIOCGIFMAP:
563 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
564 err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
565 err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
566 err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
567 err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
568 err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
569 err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
570 if (err)
571 err = -EFAULT;
572 break;
573 }
574 }
575 return err;
576}
577
578struct rtentry32 {
579 u32 rt_pad1;
580 struct sockaddr rt_dst; /* target address */
581 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
582 struct sockaddr rt_genmask; /* target network mask (IP) */
583 unsigned short rt_flags;
584 short rt_pad2;
585 u32 rt_pad3;
586 unsigned char rt_tos;
587 unsigned char rt_class;
588 short rt_pad4;
589 short rt_metric; /* +1 for binary compatibility! */
590 /* char * */ u32 rt_dev; /* forcing the device at add */
591 u32 rt_mtu; /* per route MTU/Window */
592 u32 rt_window; /* Window clamping */
593 unsigned short rt_irtt; /* Initial RTT */
594
595};
596
597struct in6_rtmsg32 {
598 struct in6_addr rtmsg_dst;
599 struct in6_addr rtmsg_src;
600 struct in6_addr rtmsg_gateway;
601 u32 rtmsg_type;
602 u16 rtmsg_dst_len;
603 u16 rtmsg_src_len;
604 u32 rtmsg_metric;
605 u32 rtmsg_info;
606 u32 rtmsg_flags;
607 s32 rtmsg_ifindex;
608};
609
610static int routing_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
611{
612 int ret;
613 void *r = NULL;
614 struct in6_rtmsg r6;
615 struct rtentry r4;
616 char devname[16];
617 u32 rtdev;
618 mm_segment_t old_fs = get_fs();
619
620 struct socket *mysock = sockfd_lookup(fd, &ret);
621
622 if (mysock && mysock->sk && mysock->sk->sk_family == AF_INET6) { /* ipv6 */
623 struct in6_rtmsg32 __user *ur6 = compat_ptr(arg);
624 ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
625 3 * sizeof(struct in6_addr));
626 ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
627 ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
628 ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
629 ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
630 ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
631 ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
632 ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
633
634 r = (void *) &r6;
635 } else { /* ipv4 */
636 struct rtentry32 __user *ur4 = compat_ptr(arg);
637 ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
638 3 * sizeof(struct sockaddr));
639 ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
640 ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
641 ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
642 ret |= __get_user (r4.rt_window, &(ur4->rt_window));
643 ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
644 ret |= __get_user (rtdev, &(ur4->rt_dev));
645 if (rtdev) {
646 ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
647 r4.rt_dev = devname; devname[15] = 0;
648 } else
649 r4.rt_dev = NULL;
650
651 r = (void *) &r4;
652 }
653
654 if (ret) {
655 ret = -EFAULT;
656 goto out;
657 }
658
659 set_fs (KERNEL_DS);
660 ret = sys_ioctl (fd, cmd, (unsigned long) r);
661 set_fs (old_fs);
662
663out:
664 if (mysock)
665 sockfd_put(mysock);
666
667 return ret;
668}
669#endif
670
671#ifdef CONFIG_BLOCK 249#ifdef CONFIG_BLOCK
672typedef struct sg_io_hdr32 { 250typedef struct sg_io_hdr32 {
673 compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */ 251 compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
@@ -1212,170 +790,6 @@ static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long a
1212 return err; 790 return err;
1213} 791}
1214 792
1215struct atmif_sioc32 {
1216 compat_int_t number;
1217 compat_int_t length;
1218 compat_caddr_t arg;
1219};
1220
1221struct atm_iobuf32 {
1222 compat_int_t length;
1223 compat_caddr_t buffer;
1224};
1225
1226#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32)
1227#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32)
1228#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32)
1229#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32)
1230#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32)
1231#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32)
1232#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32)
1233#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32)
1234#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32)
1235#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32)
1236#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32)
1237#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32)
1238#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32)
1239#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32)
1240#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32)
1241#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32)
1242#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32)
1243
1244static struct {
1245 unsigned int cmd32;
1246 unsigned int cmd;
1247} atm_ioctl_map[] = {
1248 { ATM_GETLINKRATE32, ATM_GETLINKRATE },
1249 { ATM_GETNAMES32, ATM_GETNAMES },
1250 { ATM_GETTYPE32, ATM_GETTYPE },
1251 { ATM_GETESI32, ATM_GETESI },
1252 { ATM_GETADDR32, ATM_GETADDR },
1253 { ATM_RSTADDR32, ATM_RSTADDR },
1254 { ATM_ADDADDR32, ATM_ADDADDR },
1255 { ATM_DELADDR32, ATM_DELADDR },
1256 { ATM_GETCIRANGE32, ATM_GETCIRANGE },
1257 { ATM_SETCIRANGE32, ATM_SETCIRANGE },
1258 { ATM_SETESI32, ATM_SETESI },
1259 { ATM_SETESIF32, ATM_SETESIF },
1260 { ATM_GETSTAT32, ATM_GETSTAT },
1261 { ATM_GETSTATZ32, ATM_GETSTATZ },
1262 { ATM_GETLOOP32, ATM_GETLOOP },
1263 { ATM_SETLOOP32, ATM_SETLOOP },
1264 { ATM_QUERYLOOP32, ATM_QUERYLOOP }
1265};
1266
1267#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
1268
1269static int do_atm_iobuf(unsigned int fd, unsigned int cmd, unsigned long arg)
1270{
1271 struct atm_iobuf __user *iobuf;
1272 struct atm_iobuf32 __user *iobuf32;
1273 u32 data;
1274 void __user *datap;
1275 int len, err;
1276
1277 iobuf = compat_alloc_user_space(sizeof(*iobuf));
1278 iobuf32 = compat_ptr(arg);
1279
1280 if (get_user(len, &iobuf32->length) ||
1281 get_user(data, &iobuf32->buffer))
1282 return -EFAULT;
1283 datap = compat_ptr(data);
1284 if (put_user(len, &iobuf->length) ||
1285 put_user(datap, &iobuf->buffer))
1286 return -EFAULT;
1287
1288 err = sys_ioctl(fd, cmd, (unsigned long)iobuf);
1289
1290 if (!err) {
1291 if (copy_in_user(&iobuf32->length, &iobuf->length,
1292 sizeof(int)))
1293 err = -EFAULT;
1294 }
1295
1296 return err;
1297}
1298
1299static int do_atmif_sioc(unsigned int fd, unsigned int cmd, unsigned long arg)
1300{
1301 struct atmif_sioc __user *sioc;
1302 struct atmif_sioc32 __user *sioc32;
1303 u32 data;
1304 void __user *datap;
1305 int err;
1306
1307 sioc = compat_alloc_user_space(sizeof(*sioc));
1308 sioc32 = compat_ptr(arg);
1309
1310 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
1311 get_user(data, &sioc32->arg))
1312 return -EFAULT;
1313 datap = compat_ptr(data);
1314 if (put_user(datap, &sioc->arg))
1315 return -EFAULT;
1316
1317 err = sys_ioctl(fd, cmd, (unsigned long) sioc);
1318
1319 if (!err) {
1320 if (copy_in_user(&sioc32->length, &sioc->length,
1321 sizeof(int)))
1322 err = -EFAULT;
1323 }
1324 return err;
1325}
1326
1327static int do_atm_ioctl(unsigned int fd, unsigned int cmd32, unsigned long arg)
1328{
1329 int i;
1330 unsigned int cmd = 0;
1331
1332 switch (cmd32) {
1333 case SONET_GETSTAT:
1334 case SONET_GETSTATZ:
1335 case SONET_GETDIAG:
1336 case SONET_SETDIAG:
1337 case SONET_CLRDIAG:
1338 case SONET_SETFRAMING:
1339 case SONET_GETFRAMING:
1340 case SONET_GETFRSENSE:
1341 return do_atmif_sioc(fd, cmd32, arg);
1342 }
1343
1344 for (i = 0; i < NR_ATM_IOCTL; i++) {
1345 if (cmd32 == atm_ioctl_map[i].cmd32) {
1346 cmd = atm_ioctl_map[i].cmd;
1347 break;
1348 }
1349 }
1350 if (i == NR_ATM_IOCTL)
1351 return -EINVAL;
1352
1353 switch (cmd) {
1354 case ATM_GETNAMES:
1355 return do_atm_iobuf(fd, cmd, arg);
1356
1357 case ATM_GETLINKRATE:
1358 case ATM_GETTYPE:
1359 case ATM_GETESI:
1360 case ATM_GETADDR:
1361 case ATM_RSTADDR:
1362 case ATM_ADDADDR:
1363 case ATM_DELADDR:
1364 case ATM_GETCIRANGE:
1365 case ATM_SETCIRANGE:
1366 case ATM_SETESI:
1367 case ATM_SETESIF:
1368 case ATM_GETSTAT:
1369 case ATM_GETSTATZ:
1370 case ATM_GETLOOP:
1371 case ATM_SETLOOP:
1372 case ATM_QUERYLOOP:
1373 return do_atmif_sioc(fd, cmd, arg);
1374 }
1375
1376 return -EINVAL;
1377}
1378
1379static __used int 793static __used int
1380ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg) 794ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
1381{ 795{
@@ -1718,21 +1132,6 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long a
1718 return sys_ioctl(fd, cmd, (unsigned long)tdata); 1132 return sys_ioctl(fd, cmd, (unsigned long)tdata);
1719} 1133}
1720 1134
1721/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
1722 * for some operations; this forces use of the newer bridge-utils that
1723 * use compatible ioctls
1724 */
1725static int old_bridge_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1726{
1727 u32 tmp;
1728
1729 if (get_user(tmp, (u32 __user *) arg))
1730 return -EFAULT;
1731 if (tmp == BRCTL_GET_VERSION)
1732 return BRCTL_VERSION + 1;
1733 return -EINVAL;
1734}
1735
1736#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t) 1135#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
1737#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t) 1136#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
1738#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t) 1137#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
@@ -1979,18 +1378,6 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
1979COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST) 1378COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
1980COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI) 1379COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
1981#endif 1380#endif
1982/* Big T */
1983COMPATIBLE_IOCTL(TUNSETNOCSUM)
1984COMPATIBLE_IOCTL(TUNSETDEBUG)
1985COMPATIBLE_IOCTL(TUNSETPERSIST)
1986COMPATIBLE_IOCTL(TUNSETOWNER)
1987COMPATIBLE_IOCTL(TUNSETLINK)
1988COMPATIBLE_IOCTL(TUNSETGROUP)
1989COMPATIBLE_IOCTL(TUNGETFEATURES)
1990COMPATIBLE_IOCTL(TUNSETOFFLOAD)
1991COMPATIBLE_IOCTL(TUNSETTXFILTER)
1992COMPATIBLE_IOCTL(TUNGETSNDBUF)
1993COMPATIBLE_IOCTL(TUNSETSNDBUF)
1994/* Big V */ 1381/* Big V */
1995COMPATIBLE_IOCTL(VT_SETMODE) 1382COMPATIBLE_IOCTL(VT_SETMODE)
1996COMPATIBLE_IOCTL(VT_GETMODE) 1383COMPATIBLE_IOCTL(VT_GETMODE)
@@ -2032,30 +1419,6 @@ COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
2032COMPATIBLE_IOCTL(MTIOCTOP) 1419COMPATIBLE_IOCTL(MTIOCTOP)
2033/* Socket level stuff */ 1420/* Socket level stuff */
2034COMPATIBLE_IOCTL(FIOQSIZE) 1421COMPATIBLE_IOCTL(FIOQSIZE)
2035COMPATIBLE_IOCTL(FIOSETOWN)
2036COMPATIBLE_IOCTL(SIOCSPGRP)
2037COMPATIBLE_IOCTL(FIOGETOWN)
2038COMPATIBLE_IOCTL(SIOCGPGRP)
2039COMPATIBLE_IOCTL(SIOCATMARK)
2040COMPATIBLE_IOCTL(SIOCSIFLINK)
2041COMPATIBLE_IOCTL(SIOCSIFENCAP)
2042COMPATIBLE_IOCTL(SIOCGIFENCAP)
2043COMPATIBLE_IOCTL(SIOCSIFNAME)
2044COMPATIBLE_IOCTL(SIOCSARP)
2045COMPATIBLE_IOCTL(SIOCGARP)
2046COMPATIBLE_IOCTL(SIOCDARP)
2047COMPATIBLE_IOCTL(SIOCSRARP)
2048COMPATIBLE_IOCTL(SIOCGRARP)
2049COMPATIBLE_IOCTL(SIOCDRARP)
2050COMPATIBLE_IOCTL(SIOCADDDLCI)
2051COMPATIBLE_IOCTL(SIOCDELDLCI)
2052COMPATIBLE_IOCTL(SIOCGMIIPHY)
2053COMPATIBLE_IOCTL(SIOCGMIIREG)
2054COMPATIBLE_IOCTL(SIOCSMIIREG)
2055COMPATIBLE_IOCTL(SIOCGIFVLAN)
2056COMPATIBLE_IOCTL(SIOCSIFVLAN)
2057COMPATIBLE_IOCTL(SIOCBRADDBR)
2058COMPATIBLE_IOCTL(SIOCBRDELBR)
2059#ifdef CONFIG_BLOCK 1422#ifdef CONFIG_BLOCK
2060/* SG stuff */ 1423/* SG stuff */
2061COMPATIBLE_IOCTL(SG_SET_TIMEOUT) 1424COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
@@ -2311,22 +1674,6 @@ COMPATIBLE_IOCTL(RAW_SETBIND)
2311COMPATIBLE_IOCTL(RAW_GETBIND) 1674COMPATIBLE_IOCTL(RAW_GETBIND)
2312/* SMB ioctls which do not need any translations */ 1675/* SMB ioctls which do not need any translations */
2313COMPATIBLE_IOCTL(SMB_IOC_NEWCONN) 1676COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
2314/* Little a */
2315COMPATIBLE_IOCTL(ATMSIGD_CTRL)
2316COMPATIBLE_IOCTL(ATMARPD_CTRL)
2317COMPATIBLE_IOCTL(ATMLEC_CTRL)
2318COMPATIBLE_IOCTL(ATMLEC_MCAST)
2319COMPATIBLE_IOCTL(ATMLEC_DATA)
2320COMPATIBLE_IOCTL(ATM_SETSC)
2321COMPATIBLE_IOCTL(SIOCSIFATMTCP)
2322COMPATIBLE_IOCTL(SIOCMKCLIP)
2323COMPATIBLE_IOCTL(ATMARP_MKIP)
2324COMPATIBLE_IOCTL(ATMARP_SETENTRY)
2325COMPATIBLE_IOCTL(ATMARP_ENCAP)
2326COMPATIBLE_IOCTL(ATMTCP_CREATE)
2327COMPATIBLE_IOCTL(ATMTCP_REMOVE)
2328COMPATIBLE_IOCTL(ATMMPC_CTRL)
2329COMPATIBLE_IOCTL(ATMMPC_DATA)
2330/* Watchdog */ 1677/* Watchdog */
2331COMPATIBLE_IOCTL(WDIOC_GETSUPPORT) 1678COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
2332COMPATIBLE_IOCTL(WDIOC_GETSTATUS) 1679COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
@@ -2532,63 +1879,6 @@ COMPATIBLE_IOCTL(JSIOCGBUTTONS)
2532COMPATIBLE_IOCTL(JSIOCGNAME(0)) 1879COMPATIBLE_IOCTL(JSIOCGNAME(0))
2533 1880
2534/* now things that need handlers */ 1881/* now things that need handlers */
2535#ifdef CONFIG_NET
2536HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
2537HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
2538HANDLE_IOCTL(SIOCGIFFLAGS, dev_ifsioc)
2539HANDLE_IOCTL(SIOCSIFFLAGS, dev_ifsioc)
2540HANDLE_IOCTL(SIOCGIFMETRIC, dev_ifsioc)
2541HANDLE_IOCTL(SIOCSIFMETRIC, dev_ifsioc)
2542HANDLE_IOCTL(SIOCGIFMTU, dev_ifsioc)
2543HANDLE_IOCTL(SIOCSIFMTU, dev_ifsioc)
2544HANDLE_IOCTL(SIOCGIFMEM, dev_ifsioc)
2545HANDLE_IOCTL(SIOCSIFMEM, dev_ifsioc)
2546HANDLE_IOCTL(SIOCGIFHWADDR, dev_ifsioc)
2547HANDLE_IOCTL(SIOCSIFHWADDR, dev_ifsioc)
2548HANDLE_IOCTL(SIOCADDMULTI, dev_ifsioc)
2549HANDLE_IOCTL(SIOCDELMULTI, dev_ifsioc)
2550HANDLE_IOCTL(SIOCGIFINDEX, dev_ifsioc)
2551HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
2552HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
2553HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
2554HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
2555HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
2556HANDLE_IOCTL(SIOCSHWTSTAMP, dev_ifsioc)
2557
2558/* ioctls used by appletalk ddp.c */
2559HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
2560HANDLE_IOCTL(SIOCDIFADDR, dev_ifsioc)
2561HANDLE_IOCTL(SIOCSARP, dev_ifsioc)
2562HANDLE_IOCTL(SIOCDARP, dev_ifsioc)
2563
2564HANDLE_IOCTL(SIOCGIFBRDADDR, dev_ifsioc)
2565HANDLE_IOCTL(SIOCSIFBRDADDR, dev_ifsioc)
2566HANDLE_IOCTL(SIOCGIFDSTADDR, dev_ifsioc)
2567HANDLE_IOCTL(SIOCSIFDSTADDR, dev_ifsioc)
2568HANDLE_IOCTL(SIOCGIFNETMASK, dev_ifsioc)
2569HANDLE_IOCTL(SIOCSIFNETMASK, dev_ifsioc)
2570HANDLE_IOCTL(SIOCSIFPFLAGS, dev_ifsioc)
2571HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
2572HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
2573HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
2574HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
2575HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
2576HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
2577HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
2578HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
2579HANDLE_IOCTL(SIOCBONDSETHWADDR, bond_ioctl)
2580HANDLE_IOCTL(SIOCBONDSLAVEINFOQUERY, bond_ioctl)
2581HANDLE_IOCTL(SIOCBONDINFOQUERY, bond_ioctl)
2582HANDLE_IOCTL(SIOCBONDCHANGEACTIVE, bond_ioctl)
2583HANDLE_IOCTL(SIOCADDRT, routing_ioctl)
2584HANDLE_IOCTL(SIOCDELRT, routing_ioctl)
2585HANDLE_IOCTL(SIOCBRADDIF, dev_ifsioc)
2586HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc)
2587/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
2588HANDLE_IOCTL(SIOCRTMSG, ret_einval)
2589HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
2590HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
2591#endif
2592#ifdef CONFIG_BLOCK 1882#ifdef CONFIG_BLOCK
2593HANDLE_IOCTL(SG_IO,sg_ioctl_trans) 1883HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
2594HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans) 1884HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
@@ -2613,31 +1903,6 @@ HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl)
2613/* One SMB ioctl needs translations. */ 1903/* One SMB ioctl needs translations. */
2614#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t) 1904#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
2615HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid) 1905HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
2616HANDLE_IOCTL(ATM_GETLINKRATE32, do_atm_ioctl)
2617HANDLE_IOCTL(ATM_GETNAMES32, do_atm_ioctl)
2618HANDLE_IOCTL(ATM_GETTYPE32, do_atm_ioctl)
2619HANDLE_IOCTL(ATM_GETESI32, do_atm_ioctl)
2620HANDLE_IOCTL(ATM_GETADDR32, do_atm_ioctl)
2621HANDLE_IOCTL(ATM_RSTADDR32, do_atm_ioctl)
2622HANDLE_IOCTL(ATM_ADDADDR32, do_atm_ioctl)
2623HANDLE_IOCTL(ATM_DELADDR32, do_atm_ioctl)
2624HANDLE_IOCTL(ATM_GETCIRANGE32, do_atm_ioctl)
2625HANDLE_IOCTL(ATM_SETCIRANGE32, do_atm_ioctl)
2626HANDLE_IOCTL(ATM_SETESI32, do_atm_ioctl)
2627HANDLE_IOCTL(ATM_SETESIF32, do_atm_ioctl)
2628HANDLE_IOCTL(ATM_GETSTAT32, do_atm_ioctl)
2629HANDLE_IOCTL(ATM_GETSTATZ32, do_atm_ioctl)
2630HANDLE_IOCTL(ATM_GETLOOP32, do_atm_ioctl)
2631HANDLE_IOCTL(ATM_SETLOOP32, do_atm_ioctl)
2632HANDLE_IOCTL(ATM_QUERYLOOP32, do_atm_ioctl)
2633HANDLE_IOCTL(SONET_GETSTAT, do_atm_ioctl)
2634HANDLE_IOCTL(SONET_GETSTATZ, do_atm_ioctl)
2635HANDLE_IOCTL(SONET_GETDIAG, do_atm_ioctl)
2636HANDLE_IOCTL(SONET_SETDIAG, do_atm_ioctl)
2637HANDLE_IOCTL(SONET_CLRDIAG, do_atm_ioctl)
2638HANDLE_IOCTL(SONET_SETFRAMING, do_atm_ioctl)
2639HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
2640HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
2641/* block stuff */ 1906/* block stuff */
2642#ifdef CONFIG_BLOCK 1907#ifdef CONFIG_BLOCK
2643/* loop */ 1908/* loop */
@@ -2655,7 +1920,7 @@ COMPATIBLE_IOCTL(TIOCSLTC)
2655#endif 1920#endif
2656#ifdef TIOCSTART 1921#ifdef TIOCSTART
2657/* 1922/*
2658 * For these two we have defintions in ioctls.h and/or termios.h on 1923 * For these two we have definitions in ioctls.h and/or termios.h on
2659 * some architectures but no actual implemention. Some applications 1924 * some architectures but no actual implemention. Some applications
2660 * like bash call them if they are defined in the headers, so we provide 1925 * like bash call them if they are defined in the headers, so we provide
2661 * entries here to avoid syslog message spew. 1926 * entries here to avoid syslog message spew.
@@ -2672,11 +1937,7 @@ COMPATIBLE_IOCTL(USBDEVFS_IOCTL32)
2672HANDLE_IOCTL(I2C_FUNCS, w_long) 1937HANDLE_IOCTL(I2C_FUNCS, w_long)
2673HANDLE_IOCTL(I2C_RDWR, do_i2c_rdwr_ioctl) 1938HANDLE_IOCTL(I2C_RDWR, do_i2c_rdwr_ioctl)
2674HANDLE_IOCTL(I2C_SMBUS, do_i2c_smbus_ioctl) 1939HANDLE_IOCTL(I2C_SMBUS, do_i2c_smbus_ioctl)
2675/* bridge */
2676HANDLE_IOCTL(SIOCSIFBR, old_bridge_ioctl)
2677HANDLE_IOCTL(SIOCGIFBR, old_bridge_ioctl)
2678/* Not implemented in the native kernel */ 1940/* Not implemented in the native kernel */
2679IGNORE_IOCTL(SIOCGIFCOUNT)
2680HANDLE_IOCTL(RTC_IRQP_READ32, rtc_ioctl) 1941HANDLE_IOCTL(RTC_IRQP_READ32, rtc_ioctl)
2681HANDLE_IOCTL(RTC_IRQP_SET32, rtc_ioctl) 1942HANDLE_IOCTL(RTC_IRQP_SET32, rtc_ioctl)
2682HANDLE_IOCTL(RTC_EPOCH_READ32, rtc_ioctl) 1943HANDLE_IOCTL(RTC_EPOCH_READ32, rtc_ioctl)
@@ -2831,12 +2092,6 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
2831 goto found_handler; 2092 goto found_handler;
2832 } 2093 }
2833 2094
2834#ifdef CONFIG_NET
2835 if (S_ISSOCK(filp->f_path.dentry->d_inode->i_mode) &&
2836 cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
2837 error = siocdevprivate_ioctl(fd, cmd, arg);
2838 } else
2839#endif
2840 { 2095 {
2841 static int count; 2096 static int count;
2842 2097
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index d22438ef7674..0d23b52dd22c 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -184,7 +184,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
184/** 184/**
185 * debugfs_create_file - create a file in the debugfs filesystem 185 * debugfs_create_file - create a file in the debugfs filesystem
186 * @name: a pointer to a string containing the name of the file to create. 186 * @name: a pointer to a string containing the name of the file to create.
187 * @mode: the permission that the file should have 187 * @mode: the permission that the file should have.
188 * @parent: a pointer to the parent dentry for this file. This should be a 188 * @parent: a pointer to the parent dentry for this file. This should be a
189 * directory dentry if set. If this paramater is NULL, then the 189 * directory dentry if set. If this paramater is NULL, then the
190 * file will be created in the root of the debugfs filesystem. 190 * file will be created in the root of the debugfs filesystem.
@@ -195,8 +195,8 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
195 * this file. 195 * this file.
196 * 196 *
197 * This is the basic "create a file" function for debugfs. It allows for a 197 * This is the basic "create a file" function for debugfs. It allows for a
198 * wide range of flexibility in createing a file, or a directory (if you 198 * wide range of flexibility in creating a file, or a directory (if you want
199 * want to create a directory, the debugfs_create_dir() function is 199 * to create a directory, the debugfs_create_dir() function is
200 * recommended to be used instead.) 200 * recommended to be used instead.)
201 * 201 *
202 * This function will return a pointer to a dentry if it succeeds. This 202 * This function will return a pointer to a dentry if it succeeds. This
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 8b10b87dc01a..b912270942fa 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1028,9 +1028,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1028 if (dio->bio) 1028 if (dio->bio)
1029 dio_bio_submit(dio); 1029 dio_bio_submit(dio);
1030 1030
1031 /* All IO is now issued, send it on its way */
1032 blk_run_address_space(inode->i_mapping);
1033
1034 /* 1031 /*
1035 * It is possible that, we return short IO due to end of file. 1032 * It is possible that, we return short IO due to end of file.
1036 * In that case, we need to release all the pages we got hold on. 1033 * In that case, we need to release all the pages we got hold on.
@@ -1057,8 +1054,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1057 ((rw & READ) || (dio->result == dio->size))) 1054 ((rw & READ) || (dio->result == dio->size)))
1058 ret = -EIOCBQUEUED; 1055 ret = -EIOCBQUEUED;
1059 1056
1060 if (ret != -EIOCBQUEUED) 1057 if (ret != -EIOCBQUEUED) {
1058 /* All IO is now issued, send it on its way */
1059 blk_run_address_space(inode->i_mapping);
1061 dio_await_completion(dio); 1060 dio_await_completion(dio);
1061 }
1062 1062
1063 /* 1063 /*
1064 * Sync will always be dropping the final ref and completing the 1064 * Sync will always be dropping the final ref and completing the
@@ -1124,7 +1124,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1124 int acquire_i_mutex = 0; 1124 int acquire_i_mutex = 0;
1125 1125
1126 if (rw & WRITE) 1126 if (rw & WRITE)
1127 rw = WRITE_ODIRECT; 1127 rw = WRITE_ODIRECT_PLUG;
1128 1128
1129 if (bdev) 1129 if (bdev)
1130 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1130 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 16f682e26c07..b540aa5d1f61 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -143,7 +143,7 @@ out:
143} 143}
144EXPORT_SYMBOL_GPL(dlm_posix_lock); 144EXPORT_SYMBOL_GPL(dlm_posix_lock);
145 145
146/* Returns failure iff a succesful lock operation should be canceled */ 146/* Returns failure iff a successful lock operation should be canceled */
147static int dlm_plock_callback(struct plock_op *op) 147static int dlm_plock_callback(struct plock_op *op)
148{ 148{
149 struct file *file; 149 struct file *file;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 085c5c063420..366c503f9657 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -251,10 +251,10 @@ ctl_table epoll_table[] = {
251 .data = &max_user_watches, 251 .data = &max_user_watches,
252 .maxlen = sizeof(int), 252 .maxlen = sizeof(int),
253 .mode = 0644, 253 .mode = 0644,
254 .proc_handler = &proc_dointvec_minmax, 254 .proc_handler = proc_dointvec_minmax,
255 .extra1 = &zero, 255 .extra1 = &zero,
256 }, 256 },
257 { .ctl_name = 0 } 257 { }
258}; 258};
259#endif /* CONFIG_SYSCTL */ 259#endif /* CONFIG_SYSCTL */
260 260
diff --git a/fs/exec.c b/fs/exec.c
index ba112bd4a339..c0c636e34f60 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -46,7 +46,6 @@
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/ima.h>
50#include <linux/syscalls.h> 49#include <linux/syscalls.h>
51#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
52#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
@@ -1209,9 +1208,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1209 retval = security_bprm_check(bprm); 1208 retval = security_bprm_check(bprm);
1210 if (retval) 1209 if (retval)
1211 return retval; 1210 return retval;
1212 retval = ima_bprm_check(bprm);
1213 if (retval)
1214 return retval;
1215 1211
1216 /* kernel module loader fixup */ 1212 /* kernel module loader fixup */
1217 /* so we don't try to load run modprobe in kernel space. */ 1213 /* so we don't try to load run modprobe in kernel space. */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 354ed3b47b30..2db957778903 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2033,7 +2033,7 @@ static Indirect *ext3_find_shared(struct inode *inode, int depth,
2033 int k, err; 2033 int k, err;
2034 2034
2035 *top = 0; 2035 *top = 0;
2036 /* Make k index the deepest non-null offest + 1 */ 2036 /* Make k index the deepest non-null offset + 1 */
2037 for (k = depth; k > 1 && !offsets[k-1]; k--) 2037 for (k = depth; k > 1 && !offsets[k-1]; k--)
2038 ; 2038 ;
2039 partial = ext3_get_branch(inode, k, offsets, chain, &err); 2039 partial = ext3_get_branch(inode, k, offsets, chain, &err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2c8caa51addb..4e8e2f15b8bd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2933,7 +2933,7 @@ retry:
2933 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2933 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2934 &mpd); 2934 &mpd);
2935 /* 2935 /*
2936 * If we have a contigous extent of pages and we 2936 * If we have a contiguous extent of pages and we
2937 * haven't done the I/O yet, map the blocks and submit 2937 * haven't done the I/O yet, map the blocks and submit
2938 * them for I/O. 2938 * them for I/O.
2939 */ 2939 */
@@ -4064,7 +4064,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth,
4064 int k, err; 4064 int k, err;
4065 4065
4066 *top = 0; 4066 *top = 0;
4067 /* Make k index the deepest non-null offest + 1 */ 4067 /* Make k index the deepest non-null offset + 1 */
4068 for (k = depth; k > 1 && !offsets[k-1]; k--) 4068 for (k = depth; k > 1 && !offsets[k-1]; k--)
4069 ; 4069 ;
4070 partial = ext4_get_branch(inode, k, offsets, chain, &err); 4070 partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@ -5376,7 +5376,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5376 * worse case, the indexs blocks spread over different block groups 5376 * worse case, the indexs blocks spread over different block groups
5377 * 5377 *
5378 * If datablocks are discontiguous, they are possible to spread over 5378 * If datablocks are discontiguous, they are possible to spread over
5379 * different block groups too. If they are contiugous, with flexbg, 5379 * different block groups too. If they are contiuguous, with flexbg,
5380 * they could still across block group boundary. 5380 * they could still across block group boundary.
5381 * 5381 *
5382 * Also account for superblock, inode, quota and xattr blocks 5382 * Also account for superblock, inode, quota and xattr blocks
@@ -5452,7 +5452,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
5452 * Calculate the journal credits for a chunk of data modification. 5452 * Calculate the journal credits for a chunk of data modification.
5453 * 5453 *
5454 * This is called from DIO, fallocate or whoever calling 5454 * This is called from DIO, fallocate or whoever calling
5455 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. 5455 * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
5456 * 5456 *
5457 * journal buffers for data blocks are not included here, as DIO 5457 * journal buffers for data blocks are not included here, as DIO
5458 * and fallocate do no need to journal data buffers. 5458 * and fallocate do no need to journal data buffers.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bba12824defa..74e495dabe09 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -142,7 +142,7 @@
142 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 142 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
143 * value of s_mb_order2_reqs can be tuned via 143 * value of s_mb_order2_reqs can be tuned via
144 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 144 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
145 * stripe size (sbi->s_stripe), we try to search for contigous block in 145 * stripe size (sbi->s_stripe), we try to search for contiguous block in
146 * stripe size. This should result in better allocation on RAID setups. If 146 * stripe size. This should result in better allocation on RAID setups. If
147 * not, we search in the specific group using bitmap for best extents. The 147 * not, we search in the specific group using bitmap for best extents. The
148 * tunable min_to_scan and max_to_scan control the behaviour here. 148 * tunable min_to_scan and max_to_scan control the behaviour here.
diff --git a/fs/file_table.c b/fs/file_table.c
index 8eb44042e009..4bef4c01ec6f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/security.h> 15#include <linux/security.h>
16#include <linux/ima.h>
17#include <linux/eventpoll.h> 16#include <linux/eventpoll.h>
18#include <linux/rcupdate.h> 17#include <linux/rcupdate.h>
19#include <linux/mount.h> 18#include <linux/mount.h>
@@ -280,7 +279,6 @@ void __fput(struct file *file)
280 if (file->f_op && file->f_op->release) 279 if (file->f_op && file->f_op->release)
281 file->f_op->release(inode, file); 280 file->f_op->release(inode, file);
282 security_file_free(file); 281 security_file_free(file);
283 ima_file_free(file);
284 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 282 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
285 cdev_put(inode->i_cdev); 283 cdev_put(inode->i_cdev);
286 fops_put(file->f_op); 284 fops_put(file->f_op);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9d5360c4c2af..49bc1b8e8f19 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -614,7 +614,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
614 struct writeback_control *wbc) 614 struct writeback_control *wbc)
615{ 615{
616 struct super_block *sb = wbc->sb, *pin_sb = NULL; 616 struct super_block *sb = wbc->sb, *pin_sb = NULL;
617 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
618 const unsigned long start = jiffies; /* livelock avoidance */ 617 const unsigned long start = jiffies; /* livelock avoidance */
619 618
620 spin_lock(&inode_lock); 619 spin_lock(&inode_lock);
@@ -635,36 +634,11 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
635 continue; 634 continue;
636 } 635 }
637 636
638 if (!bdi_cap_writeback_dirty(wb->bdi)) {
639 redirty_tail(inode);
640 if (is_blkdev_sb) {
641 /*
642 * Dirty memory-backed blockdev: the ramdisk
643 * driver does this. Skip just this inode
644 */
645 continue;
646 }
647 /*
648 * Dirty memory-backed inode against a filesystem other
649 * than the kernel-internal bdev filesystem. Skip the
650 * entire superblock.
651 */
652 break;
653 }
654
655 if (inode->i_state & (I_NEW | I_WILL_FREE)) { 637 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
656 requeue_io(inode); 638 requeue_io(inode);
657 continue; 639 continue;
658 } 640 }
659 641
660 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
661 wbc->encountered_congestion = 1;
662 if (!is_blkdev_sb)
663 break; /* Skip a congested fs */
664 requeue_io(inode);
665 continue; /* Skip a congested blockdev */
666 }
667
668 /* 642 /*
669 * Was this inode dirtied after sync_sb_inodes was called? 643 * Was this inode dirtied after sync_sb_inodes was called?
670 * This keeps sync from extra jobs and livelock. 644 * This keeps sync from extra jobs and livelock.
@@ -756,6 +730,7 @@ static long wb_writeback(struct bdi_writeback *wb,
756 .sync_mode = args->sync_mode, 730 .sync_mode = args->sync_mode,
757 .older_than_this = NULL, 731 .older_than_this = NULL,
758 .for_kupdate = args->for_kupdate, 732 .for_kupdate = args->for_kupdate,
733 .for_background = args->for_background,
759 .range_cyclic = args->range_cyclic, 734 .range_cyclic = args->range_cyclic,
760 }; 735 };
761 unsigned long oldest_jif; 736 unsigned long oldest_jif;
@@ -787,7 +762,6 @@ static long wb_writeback(struct bdi_writeback *wb,
787 break; 762 break;
788 763
789 wbc.more_io = 0; 764 wbc.more_io = 0;
790 wbc.encountered_congestion = 0;
791 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 765 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
792 wbc.pages_skipped = 0; 766 wbc.pages_skipped = 0;
793 writeback_inodes_wb(wb, &wbc); 767 writeback_inodes_wb(wb, &wbc);
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 9bbb8ce7bea0..864dac20a242 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
54 enabled by setting bits in /sys/modules/fscache/parameter/debug. 54 enabled by setting bits in /sys/modules/fscache/parameter/debug.
55 55
56 See Documentation/filesystems/caching/fscache.txt for more information. 56 See Documentation/filesystems/caching/fscache.txt for more information.
57
58config FSCACHE_OBJECT_LIST
59 bool "Maintain global object list for debugging purposes"
60 depends on FSCACHE && PROC_FS
61 help
62 Maintain a global list of active fscache objects that can be
63 retrieved through /proc/fs/fscache/objects for debugging purposes
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 91571b95aacc..6d561531cb36 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -15,5 +15,6 @@ fscache-y := \
15fscache-$(CONFIG_PROC_FS) += proc.o 15fscache-$(CONFIG_PROC_FS) += proc.o
16fscache-$(CONFIG_FSCACHE_STATS) += stats.o 16fscache-$(CONFIG_FSCACHE_STATS) += stats.o
17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o 17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
18fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
18 19
19obj-$(CONFIG_FSCACHE) := fscache.o 20obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index e21985bbb1fb..6a3c48abd677 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
263 spin_lock(&cache->object_list_lock); 263 spin_lock(&cache->object_list_lock);
264 list_add_tail(&ifsdef->cache_link, &cache->object_list); 264 list_add_tail(&ifsdef->cache_link, &cache->object_list);
265 spin_unlock(&cache->object_list_lock); 265 spin_unlock(&cache->object_list_lock);
266 fscache_objlist_add(ifsdef);
266 267
267 /* add the cache's netfs definition index object to the top level index 268 /* add the cache's netfs definition index object to the top level index
268 * cookie as a known backing object */ 269 * cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
380 381
381 /* make sure all pages pinned by operations on behalf of the netfs are 382 /* make sure all pages pinned by operations on behalf of the netfs are
382 * written to disk */ 383 * written to disk */
384 fscache_stat(&fscache_n_cop_sync_cache);
383 cache->ops->sync_cache(cache); 385 cache->ops->sync_cache(cache);
386 fscache_stat_d(&fscache_n_cop_sync_cache);
384 387
385 /* dissociate all the netfs pages backed by this cache from the block 388 /* dissociate all the netfs pages backed by this cache from the block
386 * mappings in the cache */ 389 * mappings in the cache */
390 fscache_stat(&fscache_n_cop_dissociate_pages);
387 cache->ops->dissociate_pages(cache); 391 cache->ops->dissociate_pages(cache);
392 fscache_stat_d(&fscache_n_cop_dissociate_pages);
388 393
389 /* we now have to destroy all the active objects pertaining to this 394 /* we now have to destroy all the active objects pertaining to this
390 * cache - which we do by passing them off to thread pool to be 395 * cache - which we do by passing them off to thread pool to be
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 72fd18f6c71f..990535071a8a 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
36 36
37 memset(cookie, 0, sizeof(*cookie)); 37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock); 38 spin_lock_init(&cookie->lock);
39 spin_lock_init(&cookie->stores_lock);
39 INIT_HLIST_HEAD(&cookie->backing_objects); 40 INIT_HLIST_HEAD(&cookie->backing_objects);
40} 41}
41 42
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
102 cookie->netfs_data = netfs_data; 103 cookie->netfs_data = netfs_data;
103 cookie->flags = 0; 104 cookie->flags = 0;
104 105
105 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); 106 /* radix tree insertion won't use the preallocation pool unless it's
107 * told it may not wait */
108 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
106 109
107 switch (cookie->def->type) { 110 switch (cookie->def->type) {
108 case FSCACHE_COOKIE_TYPE_INDEX: 111 case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
249 252
250 /* ask the cache to allocate an object (we may end up with duplicate 253 /* ask the cache to allocate an object (we may end up with duplicate
251 * objects at this stage, but we sort that out later) */ 254 * objects at this stage, but we sort that out later) */
255 fscache_stat(&fscache_n_cop_alloc_object);
252 object = cache->ops->alloc_object(cache, cookie); 256 object = cache->ops->alloc_object(cache, cookie);
257 fscache_stat_d(&fscache_n_cop_alloc_object);
253 if (IS_ERR(object)) { 258 if (IS_ERR(object)) {
254 fscache_stat(&fscache_n_object_no_alloc); 259 fscache_stat(&fscache_n_object_no_alloc);
255 ret = PTR_ERR(object); 260 ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
270 /* only attach if we managed to allocate all we needed, otherwise 275 /* only attach if we managed to allocate all we needed, otherwise
271 * discard the object we just allocated and instead use the one 276 * discard the object we just allocated and instead use the one
272 * attached to the cookie */ 277 * attached to the cookie */
273 if (fscache_attach_object(cookie, object) < 0) 278 if (fscache_attach_object(cookie, object) < 0) {
279 fscache_stat(&fscache_n_cop_put_object);
274 cache->ops->put_object(object); 280 cache->ops->put_object(object);
281 fscache_stat_d(&fscache_n_cop_put_object);
282 }
275 283
276 _leave(" = 0"); 284 _leave(" = 0");
277 return 0; 285 return 0;
@@ -287,7 +295,9 @@ object_already_extant:
287 return 0; 295 return 0;
288 296
289error_put: 297error_put:
298 fscache_stat(&fscache_n_cop_put_object);
290 cache->ops->put_object(object); 299 cache->ops->put_object(object);
300 fscache_stat_d(&fscache_n_cop_put_object);
291error: 301error:
292 _leave(" = %d", ret); 302 _leave(" = %d", ret);
293 return ret; 303 return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
349 object->cookie = cookie; 359 object->cookie = cookie;
350 atomic_inc(&cookie->usage); 360 atomic_inc(&cookie->usage);
351 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 361 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
362
363 fscache_objlist_add(object);
352 ret = 0; 364 ret = 0;
353 365
354cant_attach_object: 366cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
403 unsigned long event; 415 unsigned long event;
404 416
405 fscache_stat(&fscache_n_relinquishes); 417 fscache_stat(&fscache_n_relinquishes);
418 if (retire)
419 fscache_stat(&fscache_n_relinquishes_retire);
406 420
407 if (!cookie) { 421 if (!cookie) {
408 fscache_stat(&fscache_n_relinquishes_null); 422 fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
428 442
429 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; 443 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
430 444
431 /* detach pointers back to the netfs */
432 spin_lock(&cookie->lock); 445 spin_lock(&cookie->lock);
433 446
434 cookie->netfs_data = NULL;
435 cookie->def = NULL;
436
437 /* break links with all the active objects */ 447 /* break links with all the active objects */
438 while (!hlist_empty(&cookie->backing_objects)) { 448 while (!hlist_empty(&cookie->backing_objects)) {
439 object = hlist_entry(cookie->backing_objects.first, 449 object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
456 BUG(); 466 BUG();
457 } 467 }
458 468
469 /* detach pointers back to the netfs */
470 cookie->netfs_data = NULL;
471 cookie->def = NULL;
472
459 spin_unlock(&cookie->lock); 473 spin_unlock(&cookie->lock);
460 474
461 if (cookie->parent) { 475 if (cookie->parent) {
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 1c341304621f..edd7434ab6e5 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -17,6 +17,7 @@
17 * - cache->object_list_lock 17 * - cache->object_list_lock
18 * - object->lock 18 * - object->lock
19 * - object->parent->lock 19 * - object->parent->lock
20 * - cookie->stores_lock
20 * - fscache_thread_lock 21 * - fscache_thread_lock
21 * 22 *
22 */ 23 */
@@ -88,17 +89,31 @@ extern int fscache_wait_bit_interruptible(void *);
88/* 89/*
89 * object.c 90 * object.c
90 */ 91 */
92extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
93
91extern void fscache_withdrawing_object(struct fscache_cache *, 94extern void fscache_withdrawing_object(struct fscache_cache *,
92 struct fscache_object *); 95 struct fscache_object *);
93extern void fscache_enqueue_object(struct fscache_object *); 96extern void fscache_enqueue_object(struct fscache_object *);
94 97
95/* 98/*
99 * object-list.c
100 */
101#ifdef CONFIG_FSCACHE_OBJECT_LIST
102extern const struct file_operations fscache_objlist_fops;
103
104extern void fscache_objlist_add(struct fscache_object *);
105#else
106#define fscache_objlist_add(object) do {} while(0)
107#endif
108
109/*
96 * operation.c 110 * operation.c
97 */ 111 */
98extern int fscache_submit_exclusive_op(struct fscache_object *, 112extern int fscache_submit_exclusive_op(struct fscache_object *,
99 struct fscache_operation *); 113 struct fscache_operation *);
100extern int fscache_submit_op(struct fscache_object *, 114extern int fscache_submit_op(struct fscache_object *,
101 struct fscache_operation *); 115 struct fscache_operation *);
116extern int fscache_cancel_op(struct fscache_operation *);
102extern void fscache_abort_object(struct fscache_object *); 117extern void fscache_abort_object(struct fscache_object *);
103extern void fscache_start_operations(struct fscache_object *); 118extern void fscache_start_operations(struct fscache_object *);
104extern void fscache_operation_gc(struct work_struct *); 119extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
127extern atomic_t fscache_n_op_deferred_release; 142extern atomic_t fscache_n_op_deferred_release;
128extern atomic_t fscache_n_op_release; 143extern atomic_t fscache_n_op_release;
129extern atomic_t fscache_n_op_gc; 144extern atomic_t fscache_n_op_gc;
145extern atomic_t fscache_n_op_cancelled;
146extern atomic_t fscache_n_op_rejected;
130 147
131extern atomic_t fscache_n_attr_changed; 148extern atomic_t fscache_n_attr_changed;
132extern atomic_t fscache_n_attr_changed_ok; 149extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
138extern atomic_t fscache_n_allocs_ok; 155extern atomic_t fscache_n_allocs_ok;
139extern atomic_t fscache_n_allocs_wait; 156extern atomic_t fscache_n_allocs_wait;
140extern atomic_t fscache_n_allocs_nobufs; 157extern atomic_t fscache_n_allocs_nobufs;
158extern atomic_t fscache_n_allocs_intr;
159extern atomic_t fscache_n_allocs_object_dead;
141extern atomic_t fscache_n_alloc_ops; 160extern atomic_t fscache_n_alloc_ops;
142extern atomic_t fscache_n_alloc_op_waits; 161extern atomic_t fscache_n_alloc_op_waits;
143 162
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
148extern atomic_t fscache_n_retrievals_nobufs; 167extern atomic_t fscache_n_retrievals_nobufs;
149extern atomic_t fscache_n_retrievals_intr; 168extern atomic_t fscache_n_retrievals_intr;
150extern atomic_t fscache_n_retrievals_nomem; 169extern atomic_t fscache_n_retrievals_nomem;
170extern atomic_t fscache_n_retrievals_object_dead;
151extern atomic_t fscache_n_retrieval_ops; 171extern atomic_t fscache_n_retrieval_ops;
152extern atomic_t fscache_n_retrieval_op_waits; 172extern atomic_t fscache_n_retrieval_op_waits;
153 173
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
158extern atomic_t fscache_n_stores_oom; 178extern atomic_t fscache_n_stores_oom;
159extern atomic_t fscache_n_store_ops; 179extern atomic_t fscache_n_store_ops;
160extern atomic_t fscache_n_store_calls; 180extern atomic_t fscache_n_store_calls;
181extern atomic_t fscache_n_store_pages;
182extern atomic_t fscache_n_store_radix_deletes;
183extern atomic_t fscache_n_store_pages_over_limit;
184
185extern atomic_t fscache_n_store_vmscan_not_storing;
186extern atomic_t fscache_n_store_vmscan_gone;
187extern atomic_t fscache_n_store_vmscan_busy;
188extern atomic_t fscache_n_store_vmscan_cancelled;
161 189
162extern atomic_t fscache_n_marks; 190extern atomic_t fscache_n_marks;
163extern atomic_t fscache_n_uncaches; 191extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
176extern atomic_t fscache_n_relinquishes; 204extern atomic_t fscache_n_relinquishes;
177extern atomic_t fscache_n_relinquishes_null; 205extern atomic_t fscache_n_relinquishes_null;
178extern atomic_t fscache_n_relinquishes_waitcrt; 206extern atomic_t fscache_n_relinquishes_waitcrt;
207extern atomic_t fscache_n_relinquishes_retire;
179 208
180extern atomic_t fscache_n_cookie_index; 209extern atomic_t fscache_n_cookie_index;
181extern atomic_t fscache_n_cookie_data; 210extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
186extern atomic_t fscache_n_object_lookups; 215extern atomic_t fscache_n_object_lookups;
187extern atomic_t fscache_n_object_lookups_negative; 216extern atomic_t fscache_n_object_lookups_negative;
188extern atomic_t fscache_n_object_lookups_positive; 217extern atomic_t fscache_n_object_lookups_positive;
218extern atomic_t fscache_n_object_lookups_timed_out;
189extern atomic_t fscache_n_object_created; 219extern atomic_t fscache_n_object_created;
190extern atomic_t fscache_n_object_avail; 220extern atomic_t fscache_n_object_avail;
191extern atomic_t fscache_n_object_dead; 221extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
195extern atomic_t fscache_n_checkaux_update; 225extern atomic_t fscache_n_checkaux_update;
196extern atomic_t fscache_n_checkaux_obsolete; 226extern atomic_t fscache_n_checkaux_obsolete;
197 227
228extern atomic_t fscache_n_cop_alloc_object;
229extern atomic_t fscache_n_cop_lookup_object;
230extern atomic_t fscache_n_cop_lookup_complete;
231extern atomic_t fscache_n_cop_grab_object;
232extern atomic_t fscache_n_cop_update_object;
233extern atomic_t fscache_n_cop_drop_object;
234extern atomic_t fscache_n_cop_put_object;
235extern atomic_t fscache_n_cop_sync_cache;
236extern atomic_t fscache_n_cop_attr_changed;
237extern atomic_t fscache_n_cop_read_or_alloc_page;
238extern atomic_t fscache_n_cop_read_or_alloc_pages;
239extern atomic_t fscache_n_cop_allocate_page;
240extern atomic_t fscache_n_cop_allocate_pages;
241extern atomic_t fscache_n_cop_write_page;
242extern atomic_t fscache_n_cop_uncache_page;
243extern atomic_t fscache_n_cop_dissociate_pages;
244
198static inline void fscache_stat(atomic_t *stat) 245static inline void fscache_stat(atomic_t *stat)
199{ 246{
200 atomic_inc(stat); 247 atomic_inc(stat);
201} 248}
202 249
250static inline void fscache_stat_d(atomic_t *stat)
251{
252 atomic_dec(stat);
253}
254
255#define __fscache_stat(stat) (stat)
256
203extern const struct file_operations fscache_stats_fops; 257extern const struct file_operations fscache_stats_fops;
204#else 258#else
205 259
260#define __fscache_stat(stat) (NULL)
206#define fscache_stat(stat) do {} while (0) 261#define fscache_stat(stat) do {} while (0)
262#define fscache_stat_d(stat) do {} while (0)
207#endif 263#endif
208 264
209/* 265/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b597499..add6bdb53f04 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
48{ 48{
49 int ret; 49 int ret;
50 50
51 ret = slow_work_register_user(); 51 ret = slow_work_register_user(THIS_MODULE);
52 if (ret < 0) 52 if (ret < 0)
53 goto error_slow_work; 53 goto error_slow_work;
54 54
@@ -80,7 +80,7 @@ error_kobj:
80error_cookie_jar: 80error_cookie_jar:
81 fscache_proc_cleanup(); 81 fscache_proc_cleanup();
82error_proc: 82error_proc:
83 slow_work_unregister_user(); 83 slow_work_unregister_user(THIS_MODULE);
84error_slow_work: 84error_slow_work:
85 return ret; 85 return ret;
86} 86}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
97 kobject_put(fscache_root); 97 kobject_put(fscache_root);
98 kmem_cache_destroy(fscache_cookie_jar); 98 kmem_cache_destroy(fscache_cookie_jar);
99 fscache_proc_cleanup(); 99 fscache_proc_cleanup();
100 slow_work_unregister_user(); 100 slow_work_unregister_user(THIS_MODULE);
101 printk(KERN_NOTICE "FS-Cache: Unloaded\n"); 101 printk(KERN_NOTICE "FS-Cache: Unloaded\n");
102} 102}
103 103
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644
index 000000000000..e590242fa41a
--- /dev/null
+++ b/fs/fscache/object-list.c
@@ -0,0 +1,432 @@
1/* Global fscache object list maintainer and viewer
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL COOKIE
13#include <linux/module.h>
14#include <linux/seq_file.h>
15#include <linux/key.h>
16#include <keys/user-type.h>
17#include "internal.h"
18
19static struct rb_root fscache_object_list;
20static DEFINE_RWLOCK(fscache_object_list_lock);
21
22struct fscache_objlist_data {
23 unsigned long config; /* display configuration */
24#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
25#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
26#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
27#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
28#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
29#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
30#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
31#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
32#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
33#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
34#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
35#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
36#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
37#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
38
39 u8 buf[512]; /* key and aux data buffer */
40};
41
42/*
43 * Add an object to the object list
44 * - we use the address of the fscache_object structure as the key into the
45 * tree
46 */
47void fscache_objlist_add(struct fscache_object *obj)
48{
49 struct fscache_object *xobj;
50 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
51
52 write_lock(&fscache_object_list_lock);
53
54 while (*p) {
55 parent = *p;
56 xobj = rb_entry(parent, struct fscache_object, objlist_link);
57
58 if (obj < xobj)
59 p = &(*p)->rb_left;
60 else if (obj > xobj)
61 p = &(*p)->rb_right;
62 else
63 BUG();
64 }
65
66 rb_link_node(&obj->objlist_link, parent, p);
67 rb_insert_color(&obj->objlist_link, &fscache_object_list);
68
69 write_unlock(&fscache_object_list_lock);
70}
71
72/**
73 * fscache_object_destroy - Note that a cache object is about to be destroyed
74 * @object: The object to be destroyed
75 *
76 * Note the imminent destruction and deallocation of a cache object record.
77 */
78void fscache_object_destroy(struct fscache_object *obj)
79{
80 write_lock(&fscache_object_list_lock);
81
82 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
83 rb_erase(&obj->objlist_link, &fscache_object_list);
84
85 write_unlock(&fscache_object_list_lock);
86}
87EXPORT_SYMBOL(fscache_object_destroy);
88
89/*
90 * find the object in the tree on or after the specified index
91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{
94 struct fscache_object *pobj, *obj, *minobj = NULL;
95 struct rb_node *p;
96 unsigned long pos;
97
98 if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
99 return NULL;
100 pos = *_pos;
101
102 /* banners (can't represent line 0 by pos 0 as that would involve
103 * returning a NULL pointer) */
104 if (pos == 0)
105 return (struct fscache_object *) ++(*_pos);
106 if (pos < 3)
107 return (struct fscache_object *)pos;
108
109 pobj = (struct fscache_object *)pos;
110 p = fscache_object_list.rb_node;
111 while (p) {
112 obj = rb_entry(p, struct fscache_object, objlist_link);
113 if (pobj < obj) {
114 if (!minobj || minobj > obj)
115 minobj = obj;
116 p = p->rb_left;
117 } else if (pobj > obj) {
118 p = p->rb_right;
119 } else {
120 minobj = obj;
121 break;
122 }
123 obj = NULL;
124 }
125
126 if (!minobj)
127 *_pos = (unsigned long) ERR_PTR(-ENOENT);
128 else if (minobj != obj)
129 *_pos = (unsigned long) minobj;
130 return minobj;
131}
132
133/*
134 * set up the iterator to start reading from the first line
135 */
136static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
137 __acquires(&fscache_object_list_lock)
138{
139 read_lock(&fscache_object_list_lock);
140 return fscache_objlist_lookup(_pos);
141}
142
143/*
144 * move to the next line
145 */
146static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
147{
148 (*_pos)++;
149 return fscache_objlist_lookup(_pos);
150}
151
152/*
153 * clean up after reading
154 */
155static void fscache_objlist_stop(struct seq_file *m, void *v)
156 __releases(&fscache_object_list_lock)
157{
158 read_unlock(&fscache_object_list_lock);
159}
160
161/*
162 * display an object
163 */
164static int fscache_objlist_show(struct seq_file *m, void *v)
165{
166 struct fscache_objlist_data *data = m->private;
167 struct fscache_object *obj = v;
168 unsigned long config = data->config;
169 uint16_t keylen, auxlen;
170 char _type[3], *type;
171 bool no_cookie;
172 u8 *buf = data->buf, *p;
173
174 if ((unsigned long) v == 1) {
175 seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
176 " EM EV F S"
177 " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
178 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
179 FSCACHE_OBJLIST_CONFIG_AUX))
180 seq_puts(m, " ");
181 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
182 seq_puts(m, "OBJECT_KEY");
183 if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
184 FSCACHE_OBJLIST_CONFIG_AUX)) ==
185 (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
186 seq_puts(m, ", ");
187 if (config & FSCACHE_OBJLIST_CONFIG_AUX)
188 seq_puts(m, "AUX_DATA");
189 seq_puts(m, "\n");
190 return 0;
191 }
192
193 if ((unsigned long) v == 2) {
194 seq_puts(m, "======== ======== ==== ===== === === === == ====="
195 " == == = ="
196 " | ================ == == ================");
197 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
198 FSCACHE_OBJLIST_CONFIG_AUX))
199 seq_puts(m, " ================");
200 seq_puts(m, "\n");
201 return 0;
202 }
203
204 /* filter out any unwanted objects */
205#define FILTER(criterion, _yes, _no) \
206 do { \
207 unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
208 unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
209 if (criterion) { \
210 if (!(config & yes)) \
211 return 0; \
212 } else { \
213 if (!(config & no)) \
214 return 0; \
215 } \
216 } while(0)
217
218 if (~config) {
219 FILTER(obj->cookie,
220 COOKIE, NOCOOKIE);
221 FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
222 obj->n_ops != 0 ||
223 obj->n_obj_ops != 0 ||
224 obj->flags ||
225 !list_empty(&obj->dependents),
226 BUSY, IDLE);
227 FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
228 PENDWR, NOPENDWR);
229 FILTER(atomic_read(&obj->n_reads),
230 READS, NOREADS);
231 FILTER(obj->events & obj->event_mask,
232 EVENTS, NOEVENTS);
233 FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
234 WORK, NOWORK);
235 }
236
237 seq_printf(m,
238 "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
239 obj->debug_id,
240 obj->parent ? obj->parent->debug_id : -1,
241 fscache_object_states_short[obj->state],
242 obj->n_children,
243 obj->n_ops,
244 obj->n_obj_ops,
245 obj->n_in_progress,
246 obj->n_exclusive,
247 atomic_read(&obj->n_reads),
248 obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
249 obj->events,
250 obj->flags,
251 obj->work.flags);
252
253 no_cookie = true;
254 keylen = auxlen = 0;
255 if (obj->cookie) {
256 spin_lock(&obj->lock);
257 if (obj->cookie) {
258 switch (obj->cookie->def->type) {
259 case 0:
260 type = "IX";
261 break;
262 case 1:
263 type = "DT";
264 break;
265 default:
266 sprintf(_type, "%02u",
267 obj->cookie->def->type);
268 type = _type;
269 break;
270 }
271
272 seq_printf(m, "%-16s %s %2lx %16p",
273 obj->cookie->def->name,
274 type,
275 obj->cookie->flags,
276 obj->cookie->netfs_data);
277
278 if (obj->cookie->def->get_key &&
279 config & FSCACHE_OBJLIST_CONFIG_KEY)
280 keylen = obj->cookie->def->get_key(
281 obj->cookie->netfs_data,
282 buf, 400);
283
284 if (obj->cookie->def->get_aux &&
285 config & FSCACHE_OBJLIST_CONFIG_AUX)
286 auxlen = obj->cookie->def->get_aux(
287 obj->cookie->netfs_data,
288 buf + keylen, 512 - keylen);
289
290 no_cookie = false;
291 }
292 spin_unlock(&obj->lock);
293
294 if (!no_cookie && (keylen > 0 || auxlen > 0)) {
295 seq_printf(m, " ");
296 for (p = buf; keylen > 0; keylen--)
297 seq_printf(m, "%02x", *p++);
298 if (auxlen > 0) {
299 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
300 seq_printf(m, ", ");
301 for (; auxlen > 0; auxlen--)
302 seq_printf(m, "%02x", *p++);
303 }
304 }
305 }
306
307 if (no_cookie)
308 seq_printf(m, "<no_cookie>\n");
309 else
310 seq_printf(m, "\n");
311 return 0;
312}
313
314static const struct seq_operations fscache_objlist_ops = {
315 .start = fscache_objlist_start,
316 .stop = fscache_objlist_stop,
317 .next = fscache_objlist_next,
318 .show = fscache_objlist_show,
319};
320
321/*
322 * get the configuration for filtering the list
323 */
324static void fscache_objlist_config(struct fscache_objlist_data *data)
325{
326#ifdef CONFIG_KEYS
327 struct user_key_payload *confkey;
328 unsigned long config;
329 struct key *key;
330 const char *buf;
331 int len;
332
333 key = request_key(&key_type_user, "fscache:objlist", NULL);
334 if (IS_ERR(key))
335 goto no_config;
336
337 config = 0;
338 rcu_read_lock();
339
340 confkey = key->payload.data;
341 buf = confkey->data;
342
343 for (len = confkey->datalen - 1; len >= 0; len--) {
344 switch (buf[len]) {
345 case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
346 case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
347 case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
348 case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
349 case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
350 case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
351 case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
352 case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
353 case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
354 case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
355 case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
356 case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
357 }
358 }
359
360 rcu_read_unlock();
361 key_put(key);
362
363 if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
364 config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
365 if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
366 config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
367 if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
368 config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
369 if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
370 config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
371 if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
372 config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
373 if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
374 config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
375
376 data->config = config;
377 return;
378
379no_config:
380#endif
381 data->config = ULONG_MAX;
382}
383
384/*
385 * open "/proc/fs/fscache/objects" to provide a list of active objects
386 * - can be configured by a user-defined key added to the caller's keyrings
387 */
388static int fscache_objlist_open(struct inode *inode, struct file *file)
389{
390 struct fscache_objlist_data *data;
391 struct seq_file *m;
392 int ret;
393
394 ret = seq_open(file, &fscache_objlist_ops);
395 if (ret < 0)
396 return ret;
397
398 m = file->private_data;
399
400 /* buffer for key extraction */
401 data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
402 if (!data) {
403 seq_release(inode, file);
404 return -ENOMEM;
405 }
406
407 /* get the configuration key */
408 fscache_objlist_config(data);
409
410 m->private = data;
411 return 0;
412}
413
414/*
415 * clean up on close
416 */
417static int fscache_objlist_release(struct inode *inode, struct file *file)
418{
419 struct seq_file *m = file->private_data;
420
421 kfree(m->private);
422 m->private = NULL;
423 return seq_release(inode, file);
424}
425
426const struct file_operations fscache_objlist_fops = {
427 .owner = THIS_MODULE,
428 .open = fscache_objlist_open,
429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = fscache_objlist_release,
432};
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79d..e513ac599c8e 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,9 +14,10 @@
14 14
15#define FSCACHE_DEBUG_LEVEL COOKIE 15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/seq_file.h>
17#include "internal.h" 18#include "internal.h"
18 19
19const char *fscache_object_states[] = { 20const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
20 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", 21 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
21 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", 22 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
22 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", 23 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
33}; 34};
34EXPORT_SYMBOL(fscache_object_states); 35EXPORT_SYMBOL(fscache_object_states);
35 36
37const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
38 [FSCACHE_OBJECT_INIT] = "INIT",
39 [FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
40 [FSCACHE_OBJECT_CREATING] = "CRTN",
41 [FSCACHE_OBJECT_AVAILABLE] = "AVBL",
42 [FSCACHE_OBJECT_ACTIVE] = "ACTV",
43 [FSCACHE_OBJECT_UPDATING] = "UPDT",
44 [FSCACHE_OBJECT_DYING] = "DYNG",
45 [FSCACHE_OBJECT_LC_DYING] = "LCDY",
46 [FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
47 [FSCACHE_OBJECT_RELEASING] = "RELS",
48 [FSCACHE_OBJECT_RECYCLING] = "RCYC",
49 [FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
50 [FSCACHE_OBJECT_DEAD] = "DEAD",
51};
52
36static void fscache_object_slow_work_put_ref(struct slow_work *); 53static void fscache_object_slow_work_put_ref(struct slow_work *);
37static int fscache_object_slow_work_get_ref(struct slow_work *); 54static int fscache_object_slow_work_get_ref(struct slow_work *);
38static void fscache_object_slow_work_execute(struct slow_work *); 55static void fscache_object_slow_work_execute(struct slow_work *);
56#ifdef CONFIG_SLOW_WORK_PROC
57static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
58#endif
39static void fscache_initialise_object(struct fscache_object *); 59static void fscache_initialise_object(struct fscache_object *);
40static void fscache_lookup_object(struct fscache_object *); 60static void fscache_lookup_object(struct fscache_object *);
41static void fscache_object_available(struct fscache_object *); 61static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
45static void fscache_dequeue_object(struct fscache_object *); 65static void fscache_dequeue_object(struct fscache_object *);
46 66
47const struct slow_work_ops fscache_object_slow_work_ops = { 67const struct slow_work_ops fscache_object_slow_work_ops = {
68 .owner = THIS_MODULE,
48 .get_ref = fscache_object_slow_work_get_ref, 69 .get_ref = fscache_object_slow_work_get_ref,
49 .put_ref = fscache_object_slow_work_put_ref, 70 .put_ref = fscache_object_slow_work_put_ref,
50 .execute = fscache_object_slow_work_execute, 71 .execute = fscache_object_slow_work_execute,
72#ifdef CONFIG_SLOW_WORK_PROC
73 .desc = fscache_object_slow_work_desc,
74#endif
51}; 75};
52EXPORT_SYMBOL(fscache_object_slow_work_ops); 76EXPORT_SYMBOL(fscache_object_slow_work_ops);
53 77
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
81static void fscache_object_state_machine(struct fscache_object *object) 105static void fscache_object_state_machine(struct fscache_object *object)
82{ 106{
83 enum fscache_object_state new_state; 107 enum fscache_object_state new_state;
108 struct fscache_cookie *cookie;
84 109
85 ASSERT(object != NULL); 110 ASSERT(object != NULL);
86 111
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
120 case FSCACHE_OBJECT_UPDATING: 145 case FSCACHE_OBJECT_UPDATING:
121 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); 146 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
122 fscache_stat(&fscache_n_updates_run); 147 fscache_stat(&fscache_n_updates_run);
148 fscache_stat(&fscache_n_cop_update_object);
123 object->cache->ops->update_object(object); 149 object->cache->ops->update_object(object);
150 fscache_stat_d(&fscache_n_cop_update_object);
124 goto active_transit; 151 goto active_transit;
125 152
126 /* handle an object dying during lookup or creation */ 153 /* handle an object dying during lookup or creation */
127 case FSCACHE_OBJECT_LC_DYING: 154 case FSCACHE_OBJECT_LC_DYING:
128 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); 155 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
156 fscache_stat(&fscache_n_cop_lookup_complete);
129 object->cache->ops->lookup_complete(object); 157 object->cache->ops->lookup_complete(object);
158 fscache_stat_d(&fscache_n_cop_lookup_complete);
130 159
131 spin_lock(&object->lock); 160 spin_lock(&object->lock);
132 object->state = FSCACHE_OBJECT_DYING; 161 object->state = FSCACHE_OBJECT_DYING;
133 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, 162 cookie = object->cookie;
134 &object->cookie->flags)) 163 if (cookie) {
135 wake_up_bit(&object->cookie->flags, 164 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
136 FSCACHE_COOKIE_CREATING); 165 &cookie->flags))
166 wake_up_bit(&cookie->flags,
167 FSCACHE_COOKIE_LOOKING_UP);
168 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
169 &cookie->flags))
170 wake_up_bit(&cookie->flags,
171 FSCACHE_COOKIE_CREATING);
172 }
137 spin_unlock(&object->lock); 173 spin_unlock(&object->lock);
138 174
139 fscache_done_parent_op(object); 175 fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
165 } 201 }
166 spin_unlock(&object->lock); 202 spin_unlock(&object->lock);
167 fscache_enqueue_dependents(object); 203 fscache_enqueue_dependents(object);
204 fscache_start_operations(object);
168 goto terminal_transit; 205 goto terminal_transit;
169 206
170 /* handle an abort during initialisation */ 207 /* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
316 353
317 _enter("{OBJ%x}", object->debug_id); 354 _enter("{OBJ%x}", object->debug_id);
318 355
319 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
320
321 start = jiffies; 356 start = jiffies;
322 fscache_object_state_machine(object); 357 fscache_object_state_machine(object);
323 fscache_hist(fscache_objs_histogram, start); 358 fscache_hist(fscache_objs_histogram, start);
324 if (object->events & object->event_mask) 359 if (object->events & object->event_mask)
325 fscache_enqueue_object(object); 360 fscache_enqueue_object(object);
361 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
362}
363
364/*
365 * describe an object for slow-work debugging
366 */
367#ifdef CONFIG_SLOW_WORK_PROC
368static void fscache_object_slow_work_desc(struct slow_work *work,
369 struct seq_file *m)
370{
371 struct fscache_object *object =
372 container_of(work, struct fscache_object, work);
373
374 seq_printf(m, "FSC: OBJ%x: %s",
375 object->debug_id,
376 fscache_object_states_short[object->state]);
326} 377}
378#endif
327 379
328/* 380/*
329 * initialise an object 381 * initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
376 * binding on to us, so we need to make sure we don't 428 * binding on to us, so we need to make sure we don't
377 * add ourself to the list multiple times */ 429 * add ourself to the list multiple times */
378 if (list_empty(&object->dep_link)) { 430 if (list_empty(&object->dep_link)) {
431 fscache_stat(&fscache_n_cop_grab_object);
379 object->cache->ops->grab_object(object); 432 object->cache->ops->grab_object(object);
433 fscache_stat_d(&fscache_n_cop_grab_object);
380 list_add(&object->dep_link, 434 list_add(&object->dep_link,
381 &parent->dependents); 435 &parent->dependents);
382 436
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
414{ 468{
415 struct fscache_cookie *cookie = object->cookie; 469 struct fscache_cookie *cookie = object->cookie;
416 struct fscache_object *parent; 470 struct fscache_object *parent;
471 int ret;
417 472
418 _enter(""); 473 _enter("");
419 474
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
438 object->cache->tag->name); 493 object->cache->tag->name);
439 494
440 fscache_stat(&fscache_n_object_lookups); 495 fscache_stat(&fscache_n_object_lookups);
441 object->cache->ops->lookup_object(object); 496 fscache_stat(&fscache_n_cop_lookup_object);
497 ret = object->cache->ops->lookup_object(object);
498 fscache_stat_d(&fscache_n_cop_lookup_object);
442 499
443 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) 500 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
444 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); 501 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
445 502
503 if (ret == -ETIMEDOUT) {
504 /* probably stuck behind another object, so move this one to
505 * the back of the queue */
506 fscache_stat(&fscache_n_object_lookups_timed_out);
507 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
508 }
509
446 _leave(""); 510 _leave("");
447} 511}
448 512
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
546 610
547 spin_lock(&object->lock); 611 spin_lock(&object->lock);
548 612
549 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) 613 if (object->cookie &&
614 test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
550 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); 615 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
551 616
552 fscache_done_parent_op(object); 617 fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
562 } 627 }
563 spin_unlock(&object->lock); 628 spin_unlock(&object->lock);
564 629
630 fscache_stat(&fscache_n_cop_lookup_complete);
565 object->cache->ops->lookup_complete(object); 631 object->cache->ops->lookup_complete(object);
632 fscache_stat_d(&fscache_n_cop_lookup_complete);
566 fscache_enqueue_dependents(object); 633 fscache_enqueue_dependents(object);
567 634
568 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); 635 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
581 648
582 _enter("{OBJ%x,%d}", object->debug_id, object->n_children); 649 _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
583 650
651 ASSERTCMP(object->cookie, ==, NULL);
652 ASSERT(hlist_unhashed(&object->cookie_link));
653
584 spin_lock(&cache->object_list_lock); 654 spin_lock(&cache->object_list_lock);
585 list_del_init(&object->cache_link); 655 list_del_init(&object->cache_link);
586 spin_unlock(&cache->object_list_lock); 656 spin_unlock(&cache->object_list_lock);
587 657
658 fscache_stat(&fscache_n_cop_drop_object);
588 cache->ops->drop_object(object); 659 cache->ops->drop_object(object);
660 fscache_stat_d(&fscache_n_cop_drop_object);
589 661
590 if (parent) { 662 if (parent) {
591 _debug("release parent OBJ%x {%d}", 663 _debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
600 } 672 }
601 673
602 /* this just shifts the object release to the slow work processor */ 674 /* this just shifts the object release to the slow work processor */
675 fscache_stat(&fscache_n_cop_put_object);
603 object->cache->ops->put_object(object); 676 object->cache->ops->put_object(object);
677 fscache_stat_d(&fscache_n_cop_put_object);
604 678
605 _leave(""); 679 _leave("");
606} 680}
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
690{ 764{
691 struct fscache_object *object = 765 struct fscache_object *object =
692 container_of(work, struct fscache_object, work); 766 container_of(work, struct fscache_object, work);
767 int ret;
693 768
694 return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; 769 fscache_stat(&fscache_n_cop_grab_object);
770 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
771 fscache_stat_d(&fscache_n_cop_grab_object);
772 return ret;
695} 773}
696 774
697/* 775/*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
702 struct fscache_object *object = 780 struct fscache_object *object =
703 container_of(work, struct fscache_object, work); 781 container_of(work, struct fscache_object, work);
704 782
705 return object->cache->ops->put_object(object); 783 fscache_stat(&fscache_n_cop_put_object);
784 object->cache->ops->put_object(object);
785 fscache_stat_d(&fscache_n_cop_put_object);
706} 786}
707 787
708/* 788/*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
739 819
740 /* sort onto appropriate lists */ 820 /* sort onto appropriate lists */
741 fscache_enqueue_object(dep); 821 fscache_enqueue_object(dep);
822 fscache_stat(&fscache_n_cop_put_object);
742 dep->cache->ops->put_object(dep); 823 dep->cache->ops->put_object(dep);
824 fscache_stat_d(&fscache_n_cop_put_object);
743 825
744 if (!list_empty(&object->dependents)) 826 if (!list_empty(&object->dependents))
745 cond_resched_lock(&object->lock); 827 cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6b..313e79a14266 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -13,6 +13,7 @@
13 13
14#define FSCACHE_DEBUG_LEVEL OPERATION 14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/seq_file.h>
16#include "internal.h" 17#include "internal.h"
17 18
18atomic_t fscache_op_debug_id; 19atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
31 _enter("{OBJ%x OP%x,%u}", 32 _enter("{OBJ%x OP%x,%u}",
32 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 33 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
33 34
35 fscache_set_op_state(op, "EnQ");
36
37 ASSERT(list_empty(&op->pend_link));
34 ASSERT(op->processor != NULL); 38 ASSERT(op->processor != NULL);
35 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 39 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36 ASSERTCMP(atomic_read(&op->usage), >, 0); 40 ASSERTCMP(atomic_read(&op->usage), >, 0);
37 41
38 if (list_empty(&op->pend_link)) { 42 fscache_stat(&fscache_n_op_enqueue);
39 switch (op->flags & FSCACHE_OP_TYPE) { 43 switch (op->flags & FSCACHE_OP_TYPE) {
40 case FSCACHE_OP_FAST: 44 case FSCACHE_OP_FAST:
41 _debug("queue fast"); 45 _debug("queue fast");
42 atomic_inc(&op->usage); 46 atomic_inc(&op->usage);
43 if (!schedule_work(&op->fast_work)) 47 if (!schedule_work(&op->fast_work))
44 fscache_put_operation(op); 48 fscache_put_operation(op);
45 break; 49 break;
46 case FSCACHE_OP_SLOW: 50 case FSCACHE_OP_SLOW:
47 _debug("queue slow"); 51 _debug("queue slow");
48 slow_work_enqueue(&op->slow_work); 52 slow_work_enqueue(&op->slow_work);
49 break; 53 break;
50 case FSCACHE_OP_MYTHREAD: 54 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention"); 55 _debug("queue for caller's attention");
52 break; 56 break;
53 default: 57 default:
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx", 58 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
55 op->flags); 59 op->flags);
56 BUG(); 60 BUG();
57 break; 61 break;
58 }
59 fscache_stat(&fscache_n_op_enqueue);
60 } 62 }
61} 63}
62EXPORT_SYMBOL(fscache_enqueue_operation); 64EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
67static void fscache_run_op(struct fscache_object *object, 69static void fscache_run_op(struct fscache_object *object,
68 struct fscache_operation *op) 70 struct fscache_operation *op)
69{ 71{
72 fscache_set_op_state(op, "Run");
73
70 object->n_in_progress++; 74 object->n_in_progress++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 75 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
72 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 76 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
87 91
88 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 92 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
89 93
94 fscache_set_op_state(op, "SubmitX");
95
90 spin_lock(&object->lock); 96 spin_lock(&object->lock);
91 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 97 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
92 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 98 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
99 ASSERT(list_empty(&op->pend_link));
93 100
94 ret = -ENOBUFS; 101 ret = -ENOBUFS;
95 if (fscache_object_is_active(object)) { 102 if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
190 197
191 ASSERTCMP(atomic_read(&op->usage), >, 0); 198 ASSERTCMP(atomic_read(&op->usage), >, 0);
192 199
200 fscache_set_op_state(op, "Submit");
201
193 spin_lock(&object->lock); 202 spin_lock(&object->lock);
194 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 203 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
195 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 204 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
205 ASSERT(list_empty(&op->pend_link));
196 206
197 ostate = object->state; 207 ostate = object->state;
198 smp_rmb(); 208 smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
222 list_add_tail(&op->pend_link, &object->pending_ops); 232 list_add_tail(&op->pend_link, &object->pending_ops);
223 fscache_stat(&fscache_n_op_pend); 233 fscache_stat(&fscache_n_op_pend);
224 ret = 0; 234 ret = 0;
235 } else if (object->state == FSCACHE_OBJECT_DYING ||
236 object->state == FSCACHE_OBJECT_LC_DYING ||
237 object->state == FSCACHE_OBJECT_WITHDRAWING) {
238 fscache_stat(&fscache_n_op_rejected);
239 ret = -ENOBUFS;
225 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { 240 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
226 fscache_report_unexpected_submission(object, op, ostate); 241 fscache_report_unexpected_submission(object, op, ostate);
227 ASSERT(!fscache_object_is_active(object)); 242 ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
264 stop = true; 279 stop = true;
265 } 280 }
266 list_del_init(&op->pend_link); 281 list_del_init(&op->pend_link);
267 object->n_in_progress++; 282 fscache_run_op(object, op);
268
269 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
270 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
271 if (op->processor)
272 fscache_enqueue_operation(op);
273 283
274 /* the pending queue was holding a ref on the object */ 284 /* the pending queue was holding a ref on the object */
275 fscache_put_operation(op); 285 fscache_put_operation(op);
@@ -282,6 +292,36 @@ void fscache_start_operations(struct fscache_object *object)
282} 292}
283 293
284/* 294/*
295 * cancel an operation that's pending on an object
296 */
297int fscache_cancel_op(struct fscache_operation *op)
298{
299 struct fscache_object *object = op->object;
300 int ret;
301
302 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
303
304 spin_lock(&object->lock);
305
306 ret = -EBUSY;
307 if (!list_empty(&op->pend_link)) {
308 fscache_stat(&fscache_n_op_cancelled);
309 list_del_init(&op->pend_link);
310 object->n_ops--;
311 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
312 object->n_exclusive--;
313 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
314 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
315 fscache_put_operation(op);
316 ret = 0;
317 }
318
319 spin_unlock(&object->lock);
320 _leave(" = %d", ret);
321 return ret;
322}
323
324/*
285 * release an operation 325 * release an operation
286 * - queues pending ops if this is the last in-progress op 326 * - queues pending ops if this is the last in-progress op
287 */ 327 */
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
298 if (!atomic_dec_and_test(&op->usage)) 338 if (!atomic_dec_and_test(&op->usage))
299 return; 339 return;
300 340
341 fscache_set_op_state(op, "Put");
342
301 _debug("PUT OP"); 343 _debug("PUT OP");
302 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 344 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
303 BUG(); 345 BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
311 353
312 object = op->object; 354 object = op->object;
313 355
356 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
357 atomic_dec(&object->n_reads);
358
314 /* now... we may get called with the object spinlock held, so we 359 /* now... we may get called with the object spinlock held, so we
315 * complete the cleanup here only if we can immediately acquire the 360 * complete the cleanup here only if we can immediately acquire the
316 * lock, and defer it otherwise */ 361 * lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
452 _leave(""); 497 _leave("");
453} 498}
454 499
500/*
501 * describe an operation for slow-work debugging
502 */
503#ifdef CONFIG_SLOW_WORK_PROC
504static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
505{
506 struct fscache_operation *op =
507 container_of(work, struct fscache_operation, slow_work);
508
509 seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
510 op->object->debug_id, op->debug_id,
511 op->name, op->state, op->flags);
512}
513#endif
514
455const struct slow_work_ops fscache_op_slow_work_ops = { 515const struct slow_work_ops fscache_op_slow_work_ops = {
516 .owner = THIS_MODULE,
456 .get_ref = fscache_op_get_ref, 517 .get_ref = fscache_op_get_ref,
457 .put_ref = fscache_op_put_ref, 518 .put_ref = fscache_op_put_ref,
458 .execute = fscache_op_execute, 519 .execute = fscache_op_execute,
520#ifdef CONFIG_SLOW_WORK_PROC
521 .desc = fscache_op_desc,
522#endif
459}; 523};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2568e0eb644f..c598ea4c4e7d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
43EXPORT_SYMBOL(__fscache_wait_on_page_write); 43EXPORT_SYMBOL(__fscache_wait_on_page_write);
44 44
45/* 45/*
46 * note that a page has finished being written to the cache 46 * decide whether a page can be released, possibly by cancelling a store to it
47 * - we're allowed to sleep if __GFP_WAIT is flagged
47 */ 48 */
48static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) 49bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50 struct page *page,
51 gfp_t gfp)
49{ 52{
50 struct page *xpage; 53 struct page *xpage;
54 void *val;
55
56 _enter("%p,%p,%x", cookie, page, gfp);
57
58 rcu_read_lock();
59 val = radix_tree_lookup(&cookie->stores, page->index);
60 if (!val) {
61 rcu_read_unlock();
62 fscache_stat(&fscache_n_store_vmscan_not_storing);
63 __fscache_uncache_page(cookie, page);
64 return true;
65 }
66
67 /* see if the page is actually undergoing storage - if so we can't get
68 * rid of it till the cache has finished with it */
69 if (radix_tree_tag_get(&cookie->stores, page->index,
70 FSCACHE_COOKIE_STORING_TAG)) {
71 rcu_read_unlock();
72 goto page_busy;
73 }
74
75 /* the page is pending storage, so we attempt to cancel the store and
76 * discard the store request so that the page can be reclaimed */
77 spin_lock(&cookie->stores_lock);
78 rcu_read_unlock();
79
80 if (radix_tree_tag_get(&cookie->stores, page->index,
81 FSCACHE_COOKIE_STORING_TAG)) {
82 /* the page started to undergo storage whilst we were looking,
83 * so now we can only wait or return */
84 spin_unlock(&cookie->stores_lock);
85 goto page_busy;
86 }
51 87
52 spin_lock(&cookie->lock);
53 xpage = radix_tree_delete(&cookie->stores, page->index); 88 xpage = radix_tree_delete(&cookie->stores, page->index);
54 spin_unlock(&cookie->lock); 89 spin_unlock(&cookie->stores_lock);
55 ASSERT(xpage != NULL); 90
91 if (xpage) {
92 fscache_stat(&fscache_n_store_vmscan_cancelled);
93 fscache_stat(&fscache_n_store_radix_deletes);
94 ASSERTCMP(xpage, ==, page);
95 } else {
96 fscache_stat(&fscache_n_store_vmscan_gone);
97 }
56 98
57 wake_up_bit(&cookie->flags, 0); 99 wake_up_bit(&cookie->flags, 0);
100 if (xpage)
101 page_cache_release(xpage);
102 __fscache_uncache_page(cookie, page);
103 return true;
104
105page_busy:
106 /* we might want to wait here, but that could deadlock the allocator as
107 * the slow-work threads writing to the cache may all end up sleeping
108 * on memory allocation */
109 fscache_stat(&fscache_n_store_vmscan_busy);
110 return false;
111}
112EXPORT_SYMBOL(__fscache_maybe_release_page);
113
114/*
115 * note that a page has finished being written to the cache
116 */
117static void fscache_end_page_write(struct fscache_object *object,
118 struct page *page)
119{
120 struct fscache_cookie *cookie;
121 struct page *xpage = NULL;
122
123 spin_lock(&object->lock);
124 cookie = object->cookie;
125 if (cookie) {
126 /* delete the page from the tree if it is now no longer
127 * pending */
128 spin_lock(&cookie->stores_lock);
129 radix_tree_tag_clear(&cookie->stores, page->index,
130 FSCACHE_COOKIE_STORING_TAG);
131 if (!radix_tree_tag_get(&cookie->stores, page->index,
132 FSCACHE_COOKIE_PENDING_TAG)) {
133 fscache_stat(&fscache_n_store_radix_deletes);
134 xpage = radix_tree_delete(&cookie->stores, page->index);
135 }
136 spin_unlock(&cookie->stores_lock);
137 wake_up_bit(&cookie->flags, 0);
138 }
139 spin_unlock(&object->lock);
140 if (xpage)
141 page_cache_release(xpage);
58} 142}
59 143
60/* 144/*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
63static void fscache_attr_changed_op(struct fscache_operation *op) 147static void fscache_attr_changed_op(struct fscache_operation *op)
64{ 148{
65 struct fscache_object *object = op->object; 149 struct fscache_object *object = op->object;
150 int ret;
66 151
67 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 152 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68 153
69 fscache_stat(&fscache_n_attr_changed_calls); 154 fscache_stat(&fscache_n_attr_changed_calls);
70 155
71 if (fscache_object_is_active(object) && 156 if (fscache_object_is_active(object)) {
72 object->cache->ops->attr_changed(object) < 0) 157 fscache_set_op_state(op, "CallFS");
73 fscache_abort_object(object); 158 fscache_stat(&fscache_n_cop_attr_changed);
159 ret = object->cache->ops->attr_changed(object);
160 fscache_stat_d(&fscache_n_cop_attr_changed);
161 fscache_set_op_state(op, "Done");
162 if (ret < 0)
163 fscache_abort_object(object);
164 }
74 165
75 _leave(""); 166 _leave("");
76} 167}
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
99 fscache_operation_init(op, NULL); 190 fscache_operation_init(op, NULL);
100 fscache_operation_init_slow(op, fscache_attr_changed_op); 191 fscache_operation_init_slow(op, fscache_attr_changed_op);
101 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); 192 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
102 194
103 spin_lock(&cookie->lock); 195 spin_lock(&cookie->lock);
104 196
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
184 op->start_time = jiffies; 276 op->start_time = jiffies;
185 INIT_WORK(&op->op.fast_work, fscache_retrieval_work); 277 INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
186 INIT_LIST_HEAD(&op->to_do); 278 INIT_LIST_HEAD(&op->to_do);
279 fscache_set_op_name(&op->op, "Retr");
187 return op; 280 return op;
188} 281}
189 282
@@ -221,6 +314,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
221} 314}
222 315
223/* 316/*
317 * wait for an object to become active (or dead)
318 */
319static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 struct fscache_retrieval *op,
321 atomic_t *stat_op_waits,
322 atomic_t *stat_object_dead)
323{
324 int ret;
325
326 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
327 goto check_if_dead;
328
329 _debug(">>> WT");
330 fscache_stat(stat_op_waits);
331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
332 fscache_wait_bit_interruptible,
333 TASK_INTERRUPTIBLE) < 0) {
334 ret = fscache_cancel_op(&op->op);
335 if (ret == 0)
336 return -ERESTARTSYS;
337
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
341 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
342 }
343 _debug("<<< GO");
344
345check_if_dead:
346 if (unlikely(fscache_object_is_dead(object))) {
347 fscache_stat(stat_object_dead);
348 return -ENOBUFS;
349 }
350 return 0;
351}
352
353/*
224 * read a page from the cache or allocate a block in which to store it 354 * read a page from the cache or allocate a block in which to store it
225 * - we return: 355 * - we return:
226 * -ENOMEM - out of memory, nothing done 356 * -ENOMEM - out of memory, nothing done
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
257 _leave(" = -ENOMEM"); 387 _leave(" = -ENOMEM");
258 return -ENOMEM; 388 return -ENOMEM;
259 } 389 }
390 fscache_set_op_name(&op->op, "RetrRA1");
260 391
261 spin_lock(&cookie->lock); 392 spin_lock(&cookie->lock);
262 393
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
267 398
268 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 399 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
269 400
401 atomic_inc(&object->n_reads);
402 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
403
270 if (fscache_submit_op(object, &op->op) < 0) 404 if (fscache_submit_op(object, &op->op) < 0)
271 goto nobufs_unlock; 405 goto nobufs_unlock;
272 spin_unlock(&cookie->lock); 406 spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
279 413
280 /* we wait for the operation to become active, and then process it 414 /* we wait for the operation to become active, and then process it
281 * *here*, in this thread, and not in the thread pool */ 415 * *here*, in this thread, and not in the thread pool */
282 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 416 ret = fscache_wait_for_retrieval_activation(
283 _debug(">>> WT"); 417 object, op,
284 fscache_stat(&fscache_n_retrieval_op_waits); 418 __fscache_stat(&fscache_n_retrieval_op_waits),
285 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 419 __fscache_stat(&fscache_n_retrievals_object_dead));
286 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 420 if (ret < 0)
287 _debug("<<< GO"); 421 goto error;
288 }
289 422
290 /* ask the cache to honour the operation */ 423 /* ask the cache to honour the operation */
291 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 424 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
425 fscache_stat(&fscache_n_cop_allocate_page);
292 ret = object->cache->ops->allocate_page(op, page, gfp); 426 ret = object->cache->ops->allocate_page(op, page, gfp);
427 fscache_stat_d(&fscache_n_cop_allocate_page);
293 if (ret == 0) 428 if (ret == 0)
294 ret = -ENODATA; 429 ret = -ENODATA;
295 } else { 430 } else {
431 fscache_stat(&fscache_n_cop_read_or_alloc_page);
296 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 432 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
433 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
297 } 434 }
298 435
436error:
299 if (ret == -ENOMEM) 437 if (ret == -ENOMEM)
300 fscache_stat(&fscache_n_retrievals_nomem); 438 fscache_stat(&fscache_n_retrievals_nomem);
301 else if (ret == -ERESTARTSYS) 439 else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
347 void *context, 485 void *context,
348 gfp_t gfp) 486 gfp_t gfp)
349{ 487{
350 fscache_pages_retrieval_func_t func;
351 struct fscache_retrieval *op; 488 struct fscache_retrieval *op;
352 struct fscache_object *object; 489 struct fscache_object *object;
353 int ret; 490 int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
369 op = fscache_alloc_retrieval(mapping, end_io_func, context); 506 op = fscache_alloc_retrieval(mapping, end_io_func, context);
370 if (!op) 507 if (!op)
371 return -ENOMEM; 508 return -ENOMEM;
509 fscache_set_op_name(&op->op, "RetrRAN");
372 510
373 spin_lock(&cookie->lock); 511 spin_lock(&cookie->lock);
374 512
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
377 object = hlist_entry(cookie->backing_objects.first, 515 object = hlist_entry(cookie->backing_objects.first,
378 struct fscache_object, cookie_link); 516 struct fscache_object, cookie_link);
379 517
518 atomic_inc(&object->n_reads);
519 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
520
380 if (fscache_submit_op(object, &op->op) < 0) 521 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 522 goto nobufs_unlock;
382 spin_unlock(&cookie->lock); 523 spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
389 530
390 /* we wait for the operation to become active, and then process it 531 /* we wait for the operation to become active, and then process it
391 * *here*, in this thread, and not in the thread pool */ 532 * *here*, in this thread, and not in the thread pool */
392 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 533 ret = fscache_wait_for_retrieval_activation(
393 _debug(">>> WT"); 534 object, op,
394 fscache_stat(&fscache_n_retrieval_op_waits); 535 __fscache_stat(&fscache_n_retrieval_op_waits),
395 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 536 __fscache_stat(&fscache_n_retrievals_object_dead));
396 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 537 if (ret < 0)
397 _debug("<<< GO"); 538 goto error;
398 }
399 539
400 /* ask the cache to honour the operation */ 540 /* ask the cache to honour the operation */
401 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) 541 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
402 func = object->cache->ops->allocate_pages; 542 fscache_stat(&fscache_n_cop_allocate_pages);
403 else 543 ret = object->cache->ops->allocate_pages(
404 func = object->cache->ops->read_or_alloc_pages; 544 op, pages, nr_pages, gfp);
405 ret = func(op, pages, nr_pages, gfp); 545 fscache_stat_d(&fscache_n_cop_allocate_pages);
546 } else {
547 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
548 ret = object->cache->ops->read_or_alloc_pages(
549 op, pages, nr_pages, gfp);
550 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
551 }
406 552
553error:
407 if (ret == -ENOMEM) 554 if (ret == -ENOMEM)
408 fscache_stat(&fscache_n_retrievals_nomem); 555 fscache_stat(&fscache_n_retrievals_nomem);
409 else if (ret == -ERESTARTSYS) 556 else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
461 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 608 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
462 if (!op) 609 if (!op)
463 return -ENOMEM; 610 return -ENOMEM;
611 fscache_set_op_name(&op->op, "RetrAL1");
464 612
465 spin_lock(&cookie->lock); 613 spin_lock(&cookie->lock);
466 614
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
475 623
476 fscache_stat(&fscache_n_alloc_ops); 624 fscache_stat(&fscache_n_alloc_ops);
477 625
478 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 626 ret = fscache_wait_for_retrieval_activation(
479 _debug(">>> WT"); 627 object, op,
480 fscache_stat(&fscache_n_alloc_op_waits); 628 __fscache_stat(&fscache_n_alloc_op_waits),
481 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 629 __fscache_stat(&fscache_n_allocs_object_dead));
482 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 630 if (ret < 0)
483 _debug("<<< GO"); 631 goto error;
484 }
485 632
486 /* ask the cache to honour the operation */ 633 /* ask the cache to honour the operation */
634 fscache_stat(&fscache_n_cop_allocate_page);
487 ret = object->cache->ops->allocate_page(op, page, gfp); 635 ret = object->cache->ops->allocate_page(op, page, gfp);
636 fscache_stat_d(&fscache_n_cop_allocate_page);
488 637
489 if (ret < 0) 638error:
639 if (ret == -ERESTARTSYS)
640 fscache_stat(&fscache_n_allocs_intr);
641 else if (ret < 0)
490 fscache_stat(&fscache_n_allocs_nobufs); 642 fscache_stat(&fscache_n_allocs_nobufs);
491 else 643 else
492 fscache_stat(&fscache_n_allocs_ok); 644 fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
521 struct fscache_storage *op = 673 struct fscache_storage *op =
522 container_of(_op, struct fscache_storage, op); 674 container_of(_op, struct fscache_storage, op);
523 struct fscache_object *object = op->op.object; 675 struct fscache_object *object = op->op.object;
524 struct fscache_cookie *cookie = object->cookie; 676 struct fscache_cookie *cookie;
525 struct page *page; 677 struct page *page;
526 unsigned n; 678 unsigned n;
527 void *results[1]; 679 void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
529 681
530 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 682 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
531 683
532 spin_lock(&cookie->lock); 684 fscache_set_op_state(&op->op, "GetPage");
685
533 spin_lock(&object->lock); 686 spin_lock(&object->lock);
687 cookie = object->cookie;
534 688
535 if (!fscache_object_is_active(object)) { 689 if (!fscache_object_is_active(object) || !cookie) {
536 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
537 spin_unlock(&cookie->lock);
538 _leave(""); 691 _leave("");
539 return; 692 return;
540 } 693 }
541 694
695 spin_lock(&cookie->stores_lock);
696
542 fscache_stat(&fscache_n_store_calls); 697 fscache_stat(&fscache_n_store_calls);
543 698
544 /* find a page to store */ 699 /* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
549 goto superseded; 704 goto superseded;
550 page = results[0]; 705 page = results[0];
551 _debug("gang %d [%lx]", n, page->index); 706 _debug("gang %d [%lx]", n, page->index);
552 if (page->index > op->store_limit) 707 if (page->index > op->store_limit) {
708 fscache_stat(&fscache_n_store_pages_over_limit);
553 goto superseded; 709 goto superseded;
710 }
554 711
555 radix_tree_tag_clear(&cookie->stores, page->index, 712 if (page) {
556 FSCACHE_COOKIE_PENDING_TAG); 713 radix_tree_tag_set(&cookie->stores, page->index,
714 FSCACHE_COOKIE_STORING_TAG);
715 radix_tree_tag_clear(&cookie->stores, page->index,
716 FSCACHE_COOKIE_PENDING_TAG);
717 }
557 718
719 spin_unlock(&cookie->stores_lock);
558 spin_unlock(&object->lock); 720 spin_unlock(&object->lock);
559 spin_unlock(&cookie->lock);
560 721
561 if (page) { 722 if (page) {
723 fscache_set_op_state(&op->op, "Store");
724 fscache_stat(&fscache_n_store_pages);
725 fscache_stat(&fscache_n_cop_write_page);
562 ret = object->cache->ops->write_page(op, page); 726 ret = object->cache->ops->write_page(op, page);
563 fscache_end_page_write(cookie, page); 727 fscache_stat_d(&fscache_n_cop_write_page);
564 page_cache_release(page); 728 fscache_set_op_state(&op->op, "EndWrite");
565 if (ret < 0) 729 fscache_end_page_write(object, page);
730 if (ret < 0) {
731 fscache_set_op_state(&op->op, "Abort");
566 fscache_abort_object(object); 732 fscache_abort_object(object);
567 else 733 } else {
568 fscache_enqueue_operation(&op->op); 734 fscache_enqueue_operation(&op->op);
735 }
569 } 736 }
570 737
571 _leave(""); 738 _leave("");
@@ -575,9 +742,9 @@ superseded:
575 /* this writer is going away and there aren't any more things to 742 /* this writer is going away and there aren't any more things to
576 * write */ 743 * write */
577 _debug("cease"); 744 _debug("cease");
745 spin_unlock(&cookie->stores_lock);
578 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 746 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
579 spin_unlock(&object->lock); 747 spin_unlock(&object->lock);
580 spin_unlock(&cookie->lock);
581 _leave(""); 748 _leave("");
582} 749}
583 750
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
634 fscache_operation_init(&op->op, fscache_release_write_op); 801 fscache_operation_init(&op->op, fscache_release_write_op);
635 fscache_operation_init_slow(&op->op, fscache_write_op); 802 fscache_operation_init_slow(&op->op, fscache_write_op);
636 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); 803 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
804 fscache_set_op_name(&op->op, "Write1");
637 805
638 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 806 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
639 if (ret < 0) 807 if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
652 /* add the page to the pending-storage radix tree on the backing 820 /* add the page to the pending-storage radix tree on the backing
653 * object */ 821 * object */
654 spin_lock(&object->lock); 822 spin_lock(&object->lock);
823 spin_lock(&cookie->stores_lock);
655 824
656 _debug("store limit %llx", (unsigned long long) object->store_limit); 825 _debug("store limit %llx", (unsigned long long) object->store_limit);
657 826
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
672 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 841 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
673 goto already_pending; 842 goto already_pending;
674 843
844 spin_unlock(&cookie->stores_lock);
675 spin_unlock(&object->lock); 845 spin_unlock(&object->lock);
676 846
677 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 847 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
693already_queued: 863already_queued:
694 fscache_stat(&fscache_n_stores_again); 864 fscache_stat(&fscache_n_stores_again);
695already_pending: 865already_pending:
866 spin_unlock(&cookie->stores_lock);
696 spin_unlock(&object->lock); 867 spin_unlock(&object->lock);
697 spin_unlock(&cookie->lock); 868 spin_unlock(&cookie->lock);
698 radix_tree_preload_end(); 869 radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
702 return 0; 873 return 0;
703 874
704submit_failed: 875submit_failed:
876 spin_lock(&cookie->stores_lock);
705 radix_tree_delete(&cookie->stores, page->index); 877 radix_tree_delete(&cookie->stores, page->index);
878 spin_unlock(&cookie->stores_lock);
706 page_cache_release(page); 879 page_cache_release(page);
707 ret = -ENOBUFS; 880 ret = -ENOBUFS;
708 goto nobufs; 881 goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
763 if (TestClearPageFsCache(page) && 936 if (TestClearPageFsCache(page) &&
764 object->cache->ops->uncache_page) { 937 object->cache->ops->uncache_page) {
765 /* the cache backend releases the cookie lock */ 938 /* the cache backend releases the cookie lock */
939 fscache_stat(&fscache_n_cop_uncache_page);
766 object->cache->ops->uncache_page(object, page); 940 object->cache->ops->uncache_page(object, page);
941 fscache_stat_d(&fscache_n_cop_uncache_page);
767 goto done; 942 goto done;
768 } 943 }
769 944
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index beeab44bc31a..1d9e4951a597 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
37 goto error_histogram; 37 goto error_histogram;
38#endif 38#endif
39 39
40#ifdef CONFIG_FSCACHE_OBJECT_LIST
41 if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
42 &fscache_objlist_fops))
43 goto error_objects;
44#endif
45
40 _leave(" = 0"); 46 _leave(" = 0");
41 return 0; 47 return 0;
42 48
49#ifdef CONFIG_FSCACHE_OBJECT_LIST
50error_objects:
51#endif
43#ifdef CONFIG_FSCACHE_HISTOGRAM 52#ifdef CONFIG_FSCACHE_HISTOGRAM
53 remove_proc_entry("fs/fscache/histogram", NULL);
44error_histogram: 54error_histogram:
45#endif 55#endif
46#ifdef CONFIG_FSCACHE_STATS 56#ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
58 */ 68 */
59void fscache_proc_cleanup(void) 69void fscache_proc_cleanup(void)
60{ 70{
71#ifdef CONFIG_FSCACHE_OBJECT_LIST
72 remove_proc_entry("fs/fscache/objects", NULL);
73#endif
61#ifdef CONFIG_FSCACHE_HISTOGRAM 74#ifdef CONFIG_FSCACHE_HISTOGRAM
62 remove_proc_entry("fs/fscache/histogram", NULL); 75 remove_proc_entry("fs/fscache/histogram", NULL);
63#endif 76#endif
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 65deb99e756b..46435f3aae68 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
25atomic_t fscache_n_op_deferred_release; 25atomic_t fscache_n_op_deferred_release;
26atomic_t fscache_n_op_release; 26atomic_t fscache_n_op_release;
27atomic_t fscache_n_op_gc; 27atomic_t fscache_n_op_gc;
28atomic_t fscache_n_op_cancelled;
29atomic_t fscache_n_op_rejected;
28 30
29atomic_t fscache_n_attr_changed; 31atomic_t fscache_n_attr_changed;
30atomic_t fscache_n_attr_changed_ok; 32atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
36atomic_t fscache_n_allocs_ok; 38atomic_t fscache_n_allocs_ok;
37atomic_t fscache_n_allocs_wait; 39atomic_t fscache_n_allocs_wait;
38atomic_t fscache_n_allocs_nobufs; 40atomic_t fscache_n_allocs_nobufs;
41atomic_t fscache_n_allocs_intr;
42atomic_t fscache_n_allocs_object_dead;
39atomic_t fscache_n_alloc_ops; 43atomic_t fscache_n_alloc_ops;
40atomic_t fscache_n_alloc_op_waits; 44atomic_t fscache_n_alloc_op_waits;
41 45
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
46atomic_t fscache_n_retrievals_nobufs; 50atomic_t fscache_n_retrievals_nobufs;
47atomic_t fscache_n_retrievals_intr; 51atomic_t fscache_n_retrievals_intr;
48atomic_t fscache_n_retrievals_nomem; 52atomic_t fscache_n_retrievals_nomem;
53atomic_t fscache_n_retrievals_object_dead;
49atomic_t fscache_n_retrieval_ops; 54atomic_t fscache_n_retrieval_ops;
50atomic_t fscache_n_retrieval_op_waits; 55atomic_t fscache_n_retrieval_op_waits;
51 56
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
56atomic_t fscache_n_stores_oom; 61atomic_t fscache_n_stores_oom;
57atomic_t fscache_n_store_ops; 62atomic_t fscache_n_store_ops;
58atomic_t fscache_n_store_calls; 63atomic_t fscache_n_store_calls;
64atomic_t fscache_n_store_pages;
65atomic_t fscache_n_store_radix_deletes;
66atomic_t fscache_n_store_pages_over_limit;
67
68atomic_t fscache_n_store_vmscan_not_storing;
69atomic_t fscache_n_store_vmscan_gone;
70atomic_t fscache_n_store_vmscan_busy;
71atomic_t fscache_n_store_vmscan_cancelled;
59 72
60atomic_t fscache_n_marks; 73atomic_t fscache_n_marks;
61atomic_t fscache_n_uncaches; 74atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
74atomic_t fscache_n_relinquishes; 87atomic_t fscache_n_relinquishes;
75atomic_t fscache_n_relinquishes_null; 88atomic_t fscache_n_relinquishes_null;
76atomic_t fscache_n_relinquishes_waitcrt; 89atomic_t fscache_n_relinquishes_waitcrt;
90atomic_t fscache_n_relinquishes_retire;
77 91
78atomic_t fscache_n_cookie_index; 92atomic_t fscache_n_cookie_index;
79atomic_t fscache_n_cookie_data; 93atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
84atomic_t fscache_n_object_lookups; 98atomic_t fscache_n_object_lookups;
85atomic_t fscache_n_object_lookups_negative; 99atomic_t fscache_n_object_lookups_negative;
86atomic_t fscache_n_object_lookups_positive; 100atomic_t fscache_n_object_lookups_positive;
101atomic_t fscache_n_object_lookups_timed_out;
87atomic_t fscache_n_object_created; 102atomic_t fscache_n_object_created;
88atomic_t fscache_n_object_avail; 103atomic_t fscache_n_object_avail;
89atomic_t fscache_n_object_dead; 104atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
93atomic_t fscache_n_checkaux_update; 108atomic_t fscache_n_checkaux_update;
94atomic_t fscache_n_checkaux_obsolete; 109atomic_t fscache_n_checkaux_obsolete;
95 110
111atomic_t fscache_n_cop_alloc_object;
112atomic_t fscache_n_cop_lookup_object;
113atomic_t fscache_n_cop_lookup_complete;
114atomic_t fscache_n_cop_grab_object;
115atomic_t fscache_n_cop_update_object;
116atomic_t fscache_n_cop_drop_object;
117atomic_t fscache_n_cop_put_object;
118atomic_t fscache_n_cop_sync_cache;
119atomic_t fscache_n_cop_attr_changed;
120atomic_t fscache_n_cop_read_or_alloc_page;
121atomic_t fscache_n_cop_read_or_alloc_pages;
122atomic_t fscache_n_cop_allocate_page;
123atomic_t fscache_n_cop_allocate_pages;
124atomic_t fscache_n_cop_write_page;
125atomic_t fscache_n_cop_uncache_page;
126atomic_t fscache_n_cop_dissociate_pages;
127
96/* 128/*
97 * display the general statistics 129 * display the general statistics
98 */ 130 */
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
129 atomic_read(&fscache_n_acquires_nobufs), 161 atomic_read(&fscache_n_acquires_nobufs),
130 atomic_read(&fscache_n_acquires_oom)); 162 atomic_read(&fscache_n_acquires_oom));
131 163
132 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", 164 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
133 atomic_read(&fscache_n_object_lookups), 165 atomic_read(&fscache_n_object_lookups),
134 atomic_read(&fscache_n_object_lookups_negative), 166 atomic_read(&fscache_n_object_lookups_negative),
135 atomic_read(&fscache_n_object_lookups_positive), 167 atomic_read(&fscache_n_object_lookups_positive),
168 atomic_read(&fscache_n_object_lookups_timed_out),
136 atomic_read(&fscache_n_object_created)); 169 atomic_read(&fscache_n_object_created));
137 170
138 seq_printf(m, "Updates: n=%u nul=%u run=%u\n", 171 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
140 atomic_read(&fscache_n_updates_null), 173 atomic_read(&fscache_n_updates_null),
141 atomic_read(&fscache_n_updates_run)); 174 atomic_read(&fscache_n_updates_run));
142 175
143 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", 176 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
144 atomic_read(&fscache_n_relinquishes), 177 atomic_read(&fscache_n_relinquishes),
145 atomic_read(&fscache_n_relinquishes_null), 178 atomic_read(&fscache_n_relinquishes_null),
146 atomic_read(&fscache_n_relinquishes_waitcrt)); 179 atomic_read(&fscache_n_relinquishes_waitcrt),
180 atomic_read(&fscache_n_relinquishes_retire));
147 181
148 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", 182 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
149 atomic_read(&fscache_n_attr_changed), 183 atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
152 atomic_read(&fscache_n_attr_changed_nomem), 186 atomic_read(&fscache_n_attr_changed_nomem),
153 atomic_read(&fscache_n_attr_changed_calls)); 187 atomic_read(&fscache_n_attr_changed_calls));
154 188
155 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", 189 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
156 atomic_read(&fscache_n_allocs), 190 atomic_read(&fscache_n_allocs),
157 atomic_read(&fscache_n_allocs_ok), 191 atomic_read(&fscache_n_allocs_ok),
158 atomic_read(&fscache_n_allocs_wait), 192 atomic_read(&fscache_n_allocs_wait),
159 atomic_read(&fscache_n_allocs_nobufs)); 193 atomic_read(&fscache_n_allocs_nobufs),
160 seq_printf(m, "Allocs : ops=%u owt=%u\n", 194 atomic_read(&fscache_n_allocs_intr));
195 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
161 atomic_read(&fscache_n_alloc_ops), 196 atomic_read(&fscache_n_alloc_ops),
162 atomic_read(&fscache_n_alloc_op_waits)); 197 atomic_read(&fscache_n_alloc_op_waits),
198 atomic_read(&fscache_n_allocs_object_dead));
163 199
164 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" 200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
165 " int=%u oom=%u\n", 201 " int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
170 atomic_read(&fscache_n_retrievals_nobufs), 206 atomic_read(&fscache_n_retrievals_nobufs),
171 atomic_read(&fscache_n_retrievals_intr), 207 atomic_read(&fscache_n_retrievals_intr),
172 atomic_read(&fscache_n_retrievals_nomem)); 208 atomic_read(&fscache_n_retrievals_nomem));
173 seq_printf(m, "Retrvls: ops=%u owt=%u\n", 209 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
174 atomic_read(&fscache_n_retrieval_ops), 210 atomic_read(&fscache_n_retrieval_ops),
175 atomic_read(&fscache_n_retrieval_op_waits)); 211 atomic_read(&fscache_n_retrieval_op_waits),
212 atomic_read(&fscache_n_retrievals_object_dead));
176 213
177 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", 214 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
178 atomic_read(&fscache_n_stores), 215 atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
180 atomic_read(&fscache_n_stores_again), 217 atomic_read(&fscache_n_stores_again),
181 atomic_read(&fscache_n_stores_nobufs), 218 atomic_read(&fscache_n_stores_nobufs),
182 atomic_read(&fscache_n_stores_oom)); 219 atomic_read(&fscache_n_stores_oom));
183 seq_printf(m, "Stores : ops=%u run=%u\n", 220 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
184 atomic_read(&fscache_n_store_ops), 221 atomic_read(&fscache_n_store_ops),
185 atomic_read(&fscache_n_store_calls)); 222 atomic_read(&fscache_n_store_calls),
223 atomic_read(&fscache_n_store_pages),
224 atomic_read(&fscache_n_store_radix_deletes),
225 atomic_read(&fscache_n_store_pages_over_limit));
186 226
187 seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", 227 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
228 atomic_read(&fscache_n_store_vmscan_not_storing),
229 atomic_read(&fscache_n_store_vmscan_gone),
230 atomic_read(&fscache_n_store_vmscan_busy),
231 atomic_read(&fscache_n_store_vmscan_cancelled));
232
233 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
188 atomic_read(&fscache_n_op_pend), 234 atomic_read(&fscache_n_op_pend),
189 atomic_read(&fscache_n_op_run), 235 atomic_read(&fscache_n_op_run),
190 atomic_read(&fscache_n_op_enqueue)); 236 atomic_read(&fscache_n_op_enqueue),
237 atomic_read(&fscache_n_op_cancelled),
238 atomic_read(&fscache_n_op_rejected));
191 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", 239 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
192 atomic_read(&fscache_n_op_deferred_release), 240 atomic_read(&fscache_n_op_deferred_release),
193 atomic_read(&fscache_n_op_release), 241 atomic_read(&fscache_n_op_release),
194 atomic_read(&fscache_n_op_gc)); 242 atomic_read(&fscache_n_op_gc));
243
244 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
245 atomic_read(&fscache_n_cop_alloc_object),
246 atomic_read(&fscache_n_cop_lookup_object),
247 atomic_read(&fscache_n_cop_lookup_complete),
248 atomic_read(&fscache_n_cop_grab_object));
249 seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
250 atomic_read(&fscache_n_cop_update_object),
251 atomic_read(&fscache_n_cop_drop_object),
252 atomic_read(&fscache_n_cop_put_object),
253 atomic_read(&fscache_n_cop_attr_changed),
254 atomic_read(&fscache_n_cop_sync_cache));
255 seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
256 atomic_read(&fscache_n_cop_read_or_alloc_page),
257 atomic_read(&fscache_n_cop_read_or_alloc_pages),
258 atomic_read(&fscache_n_cop_allocate_page),
259 atomic_read(&fscache_n_cop_allocate_pages),
260 atomic_read(&fscache_n_cop_write_page),
261 atomic_read(&fscache_n_cop_uncache_page),
262 atomic_read(&fscache_n_cop_dissociate_pages));
195 return 0; 263 return 0;
196} 264}
197 265
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8ada78aade58..4787ae6c5c1c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
385 if (fc->no_create) 385 if (fc->no_create)
386 return -ENOSYS; 386 return -ENOSYS;
387 387
388 if (flags & O_DIRECT)
389 return -EINVAL;
390
388 forget_req = fuse_get_req(fc); 391 forget_req = fuse_get_req(fc);
389 if (IS_ERR(forget_req)) 392 if (IS_ERR(forget_req))
390 return PTR_ERR(forget_req); 393 return PTR_ERR(forget_req);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 5971359d2090..4dcddf83326f 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -8,6 +8,8 @@ config GFS2_FS
8 select FS_POSIX_ACL 8 select FS_POSIX_ACL
9 select CRC32 9 select CRC32
10 select SLOW_WORK 10 select SLOW_WORK
11 select QUOTA
12 select QUOTACTL
11 help 13 help
12 A cluster filesystem. 14 A cluster filesystem.
13 15
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3fc4e3ac7d84..3eb1ea846173 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
15#include <linux/posix_acl.h> 16#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h> 17#include <linux/posix_acl_xattr.h>
17#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
@@ -26,108 +27,44 @@
26#include "trans.h" 27#include "trans.h"
27#include "util.h" 28#include "util.h"
28 29
29#define ACL_ACCESS 1 30static const char *gfs2_acl_name(int type)
30#define ACL_DEFAULT 0
31
32int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
33 struct gfs2_ea_request *er, int *remove, mode_t *mode)
34{ 31{
35 struct posix_acl *acl; 32 switch (type) {
36 int error; 33 case ACL_TYPE_ACCESS:
37 34 return GFS2_POSIX_ACL_ACCESS;
38 error = gfs2_acl_validate_remove(ip, access); 35 case ACL_TYPE_DEFAULT:
39 if (error) 36 return GFS2_POSIX_ACL_DEFAULT;
40 return error;
41
42 if (!er->er_data)
43 return -EINVAL;
44
45 acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
46 if (IS_ERR(acl))
47 return PTR_ERR(acl);
48 if (!acl) {
49 *remove = 1;
50 return 0;
51 }
52
53 error = posix_acl_valid(acl);
54 if (error)
55 goto out;
56
57 if (access) {
58 error = posix_acl_equiv_mode(acl, mode);
59 if (!error)
60 *remove = 1;
61 else if (error > 0)
62 error = 0;
63 } 37 }
64 38 return NULL;
65out:
66 posix_acl_release(acl);
67 return error;
68}
69
70int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
71{
72 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
73 return -EOPNOTSUPP;
74 if (!is_owner_or_cap(&ip->i_inode))
75 return -EPERM;
76 if (S_ISLNK(ip->i_inode.i_mode))
77 return -EOPNOTSUPP;
78 if (!access && !S_ISDIR(ip->i_inode.i_mode))
79 return -EACCES;
80
81 return 0;
82} 39}
83 40
84static int acl_get(struct gfs2_inode *ip, const char *name, 41static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
85 struct posix_acl **acl, struct gfs2_ea_location *el,
86 char **datap, unsigned int *lenp)
87{ 42{
43 struct posix_acl *acl;
44 const char *name;
88 char *data; 45 char *data;
89 unsigned int len; 46 int len;
90 int error;
91
92 el->el_bh = NULL;
93 47
94 if (!ip->i_eattr) 48 if (!ip->i_eattr)
95 return 0; 49 return NULL;
96
97 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
98 if (error)
99 return error;
100 if (!el->el_ea)
101 return 0;
102 if (!GFS2_EA_DATA_LEN(el->el_ea))
103 goto out;
104 50
105 len = GFS2_EA_DATA_LEN(el->el_ea); 51 acl = get_cached_acl(&ip->i_inode, type);
106 data = kmalloc(len, GFP_NOFS); 52 if (acl != ACL_NOT_CACHED)
107 error = -ENOMEM; 53 return acl;
108 if (!data)
109 goto out;
110 54
111 error = gfs2_ea_get_copy(ip, el, data, len); 55 name = gfs2_acl_name(type);
112 if (error < 0) 56 if (name == NULL)
113 goto out_kfree; 57 return ERR_PTR(-EINVAL);
114 error = 0;
115 58
116 if (acl) { 59 len = gfs2_xattr_acl_get(ip, name, &data);
117 *acl = posix_acl_from_xattr(data, len); 60 if (len < 0)
118 if (IS_ERR(*acl)) 61 return ERR_PTR(len);
119 error = PTR_ERR(*acl); 62 if (len == 0)
120 } 63 return NULL;
121 64
122out_kfree: 65 acl = posix_acl_from_xattr(data, len);
123 if (error || !datap) { 66 kfree(data);
124 kfree(data); 67 return acl;
125 } else {
126 *datap = data;
127 *lenp = len;
128 }
129out:
130 return error;
131} 68}
132 69
133/** 70/**
@@ -140,14 +77,12 @@ out:
140 77
141int gfs2_check_acl(struct inode *inode, int mask) 78int gfs2_check_acl(struct inode *inode, int mask)
142{ 79{
143 struct gfs2_ea_location el; 80 struct posix_acl *acl;
144 struct posix_acl *acl = NULL;
145 int error; 81 int error;
146 82
147 error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL); 83 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
148 brelse(el.el_bh); 84 if (IS_ERR(acl))
149 if (error) 85 return PTR_ERR(acl);
150 return error;
151 86
152 if (acl) { 87 if (acl) {
153 error = posix_acl_permission(inode, acl, mask); 88 error = posix_acl_permission(inode, acl, mask);
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
158 return -EAGAIN; 93 return -EAGAIN;
159} 94}
160 95
161static int munge_mode(struct gfs2_inode *ip, mode_t mode) 96static int gfs2_set_mode(struct inode *inode, mode_t mode)
162{ 97{
163 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 98 int error = 0;
164 struct buffer_head *dibh;
165 int error;
166 99
167 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 100 if (mode != inode->i_mode) {
168 if (error) 101 struct iattr iattr;
169 return error;
170 102
171 error = gfs2_meta_inode_buffer(ip, &dibh); 103 iattr.ia_valid = ATTR_MODE;
172 if (!error) { 104 iattr.ia_mode = mode;
173 gfs2_assert_withdraw(sdp, 105
174 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); 106 error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
175 ip->i_inode.i_mode = mode;
176 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
177 gfs2_dinode_out(ip, dibh->b_data);
178 brelse(dibh);
179 } 107 }
180 108
181 gfs2_trans_end(sdp); 109 return error;
110}
111
112static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
113{
114 int error;
115 int len;
116 char *data;
117 const char *name = gfs2_acl_name(type);
182 118
183 return 0; 119 BUG_ON(name == NULL);
120 len = posix_acl_to_xattr(acl, NULL, 0);
121 if (len == 0)
122 return 0;
123 data = kmalloc(len, GFP_NOFS);
124 if (data == NULL)
125 return -ENOMEM;
126 error = posix_acl_to_xattr(acl, data, len);
127 if (error < 0)
128 goto out;
129 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
130 if (!error)
131 set_cached_acl(inode, type, acl);
132out:
133 kfree(data);
134 return error;
184} 135}
185 136
186int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) 137int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
187{ 138{
188 struct gfs2_ea_location el;
189 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 139 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
190 struct posix_acl *acl = NULL, *clone; 140 struct posix_acl *acl, *clone;
191 mode_t mode = ip->i_inode.i_mode; 141 mode_t mode = inode->i_mode;
192 char *data = NULL; 142 int error = 0;
193 unsigned int len;
194 int error;
195 143
196 if (!sdp->sd_args.ar_posix_acl) 144 if (!sdp->sd_args.ar_posix_acl)
197 return 0; 145 return 0;
198 if (S_ISLNK(ip->i_inode.i_mode)) 146 if (S_ISLNK(inode->i_mode))
199 return 0; 147 return 0;
200 148
201 error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len); 149 acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
202 brelse(el.el_bh); 150 if (IS_ERR(acl))
203 if (error) 151 return PTR_ERR(acl);
204 return error;
205 if (!acl) { 152 if (!acl) {
206 mode &= ~current_umask(); 153 mode &= ~current_umask();
207 if (mode != ip->i_inode.i_mode) 154 if (mode != inode->i_mode)
208 error = munge_mode(ip, mode); 155 error = gfs2_set_mode(inode, mode);
209 return error; 156 return error;
210 } 157 }
211 158
159 if (S_ISDIR(inode->i_mode)) {
160 error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
161 if (error)
162 goto out;
163 }
164
212 clone = posix_acl_clone(acl, GFP_NOFS); 165 clone = posix_acl_clone(acl, GFP_NOFS);
213 error = -ENOMEM; 166 error = -ENOMEM;
214 if (!clone) 167 if (!clone)
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
216 posix_acl_release(acl); 169 posix_acl_release(acl);
217 acl = clone; 170 acl = clone;
218 171
219 if (S_ISDIR(ip->i_inode.i_mode)) {
220 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
221 GFS2_POSIX_ACL_DEFAULT, data, len, 0);
222 if (error)
223 goto out;
224 }
225
226 error = posix_acl_create_masq(acl, &mode); 172 error = posix_acl_create_masq(acl, &mode);
227 if (error < 0) 173 if (error < 0)
228 goto out; 174 goto out;
229 if (error == 0) 175 if (error == 0)
230 goto munge; 176 goto munge;
231 177
232 posix_acl_to_xattr(acl, data, len); 178 error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
233 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
234 GFS2_POSIX_ACL_ACCESS, data, len, 0);
235 if (error) 179 if (error)
236 goto out; 180 goto out;
237munge: 181munge:
238 error = munge_mode(ip, mode); 182 error = gfs2_set_mode(inode, mode);
239out: 183out:
240 posix_acl_release(acl); 184 posix_acl_release(acl);
241 kfree(data);
242 return error; 185 return error;
243} 186}
244 187
245int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) 188int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
246{ 189{
247 struct posix_acl *acl = NULL, *clone; 190 struct posix_acl *acl, *clone;
248 struct gfs2_ea_location el;
249 char *data; 191 char *data;
250 unsigned int len; 192 unsigned int len;
251 int error; 193 int error;
252 194
253 error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len); 195 acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
254 if (error) 196 if (IS_ERR(acl))
255 goto out_brelse; 197 return PTR_ERR(acl);
256 if (!acl) 198 if (!acl)
257 return gfs2_setattr_simple(ip, attr); 199 return gfs2_setattr_simple(ip, attr);
258 200
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
265 207
266 error = posix_acl_chmod_masq(acl, attr->ia_mode); 208 error = posix_acl_chmod_masq(acl, attr->ia_mode);
267 if (!error) { 209 if (!error) {
210 len = posix_acl_to_xattr(acl, NULL, 0);
211 data = kmalloc(len, GFP_NOFS);
212 error = -ENOMEM;
213 if (data == NULL)
214 goto out;
268 posix_acl_to_xattr(acl, data, len); 215 posix_acl_to_xattr(acl, data, len);
269 error = gfs2_ea_acl_chmod(ip, &el, attr, data); 216 error = gfs2_xattr_acl_chmod(ip, attr, data);
217 kfree(data);
218 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
270 } 219 }
271 220
272out: 221out:
273 posix_acl_release(acl); 222 posix_acl_release(acl);
274 kfree(data);
275out_brelse:
276 brelse(el.el_bh);
277 return error; 223 return error;
278} 224}
279 225
226static int gfs2_acl_type(const char *name)
227{
228 if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
229 return ACL_TYPE_ACCESS;
230 if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
231 return ACL_TYPE_DEFAULT;
232 return -EINVAL;
233}
234
235static int gfs2_xattr_system_get(struct inode *inode, const char *name,
236 void *buffer, size_t size)
237{
238 struct posix_acl *acl;
239 int type;
240 int error;
241
242 type = gfs2_acl_type(name);
243 if (type < 0)
244 return type;
245
246 acl = gfs2_acl_get(GFS2_I(inode), type);
247 if (IS_ERR(acl))
248 return PTR_ERR(acl);
249 if (acl == NULL)
250 return -ENODATA;
251
252 error = posix_acl_to_xattr(acl, buffer, size);
253 posix_acl_release(acl);
254
255 return error;
256}
257
258static int gfs2_xattr_system_set(struct inode *inode, const char *name,
259 const void *value, size_t size, int flags)
260{
261 struct gfs2_sbd *sdp = GFS2_SB(inode);
262 struct posix_acl *acl = NULL;
263 int error = 0, type;
264
265 if (!sdp->sd_args.ar_posix_acl)
266 return -EOPNOTSUPP;
267
268 type = gfs2_acl_type(name);
269 if (type < 0)
270 return type;
271 if (flags & XATTR_CREATE)
272 return -EINVAL;
273 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
274 return value ? -EACCES : 0;
275 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
276 return -EPERM;
277 if (S_ISLNK(inode->i_mode))
278 return -EOPNOTSUPP;
279
280 if (!value)
281 goto set_acl;
282
283 acl = posix_acl_from_xattr(value, size);
284 if (!acl) {
285 /*
286 * acl_set_file(3) may request that we set default ACLs with
287 * zero length -- defend (gracefully) against that here.
288 */
289 goto out;
290 }
291 if (IS_ERR(acl)) {
292 error = PTR_ERR(acl);
293 goto out;
294 }
295
296 error = posix_acl_valid(acl);
297 if (error)
298 goto out_release;
299
300 error = -EINVAL;
301 if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
302 goto out_release;
303
304 if (type == ACL_TYPE_ACCESS) {
305 mode_t mode = inode->i_mode;
306 error = posix_acl_equiv_mode(acl, &mode);
307
308 if (error <= 0) {
309 posix_acl_release(acl);
310 acl = NULL;
311
312 if (error < 0)
313 return error;
314 }
315
316 error = gfs2_set_mode(inode, mode);
317 if (error)
318 goto out_release;
319 }
320
321set_acl:
322 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
323 if (!error) {
324 if (acl)
325 set_cached_acl(inode, type, acl);
326 else
327 forget_cached_acl(inode, type);
328 }
329out_release:
330 posix_acl_release(acl);
331out:
332 return error;
333}
334
335struct xattr_handler gfs2_xattr_system_handler = {
336 .prefix = XATTR_SYSTEM_PREFIX,
337 .get = gfs2_xattr_system_get,
338 .set = gfs2_xattr_system_set,
339};
340
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 6751930bfb64..9306a2e6620c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
13#include "incore.h" 13#include "incore.h"
14 14
15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access" 15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
16#define GFS2_POSIX_ACL_ACCESS_LEN 16
17#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
18#define GFS2_POSIX_ACL_DEFAULT_LEN 17 17#define GFS2_ACL_MAX_ENTRIES 25
19 18
20#define GFS2_ACL_IS_ACCESS(name, len) \ 19extern int gfs2_check_acl(struct inode *inode, int mask);
21 ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ 20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
22 !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) 21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
23 22extern struct xattr_handler gfs2_xattr_system_handler;
24#define GFS2_ACL_IS_DEFAULT(name, len) \
25 ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
26 !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
27
28struct gfs2_ea_request;
29
30int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl(struct inode *inode, int mask);
35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
37 23
38#endif /* __ACL_DOT_H__ */ 24#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 694b5d48f036..7b8da9415267 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1); 270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
272 struct backing_dev_info *bdi = mapping->backing_dev_info;
273 int i; 272 int i;
274 int ret; 273 int ret;
275 274
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
313 312
314 if (ret || (--(wbc->nr_to_write) <= 0)) 313 if (ret || (--(wbc->nr_to_write) <= 0))
315 ret = 1; 314 ret = 1;
316 if (wbc->nonblocking && bdi_write_congested(bdi)) {
317 wbc->encountered_congestion = 1;
318 ret = 1;
319 }
320
321 } 315 }
322 gfs2_trans_end(sdp); 316 gfs2_trans_end(sdp);
323 return ret; 317 return ret;
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
338static int gfs2_write_cache_jdata(struct address_space *mapping, 332static int gfs2_write_cache_jdata(struct address_space *mapping,
339 struct writeback_control *wbc) 333 struct writeback_control *wbc)
340{ 334{
341 struct backing_dev_info *bdi = mapping->backing_dev_info;
342 int ret = 0; 335 int ret = 0;
343 int done = 0; 336 int done = 0;
344 struct pagevec pvec; 337 struct pagevec pvec;
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
348 int scanned = 0; 341 int scanned = 0;
349 int range_whole = 0; 342 int range_whole = 0;
350 343
351 if (wbc->nonblocking && bdi_write_congested(bdi)) {
352 wbc->encountered_congestion = 1;
353 return 0;
354 }
355
356 pagevec_init(&pvec, 0); 344 pagevec_init(&pvec, 0);
357 if (wbc->range_cyclic) { 345 if (wbc->range_cyclic) {
358 index = mapping->writeback_index; /* Start from prev offset */ 346 index = mapping->writeback_index; /* Start from prev offset */
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
819 mark_inode_dirty(inode); 807 mark_inode_dirty(inode);
820 } 808 }
821 809
822 if (inode == sdp->sd_rindex) 810 if (inode == sdp->sd_rindex) {
823 adjust_fs_space(inode); 811 adjust_fs_space(inode);
812 ip->i_gh.gh_flags |= GL_NOCACHE;
813 }
824 814
825 brelse(dibh); 815 brelse(dibh);
826 gfs2_trans_end(sdp); 816 gfs2_trans_end(sdp);
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
889 mark_inode_dirty(inode); 879 mark_inode_dirty(inode);
890 } 880 }
891 881
892 if (inode == sdp->sd_rindex) 882 if (inode == sdp->sd_rindex) {
893 adjust_fs_space(inode); 883 adjust_fs_space(inode);
884 ip->i_gh.gh_flags |= GL_NOCACHE;
885 }
894 886
895 brelse(dibh); 887 brelse(dibh);
896 gfs2_trans_end(sdp); 888 gfs2_trans_end(sdp);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 297d7e5cebad..25fddc100f18 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -525,38 +525,6 @@ consist_inode:
525 return ERR_PTR(-EIO); 525 return ERR_PTR(-EIO);
526} 526}
527 527
528
529/**
530 * dirent_first - Return the first dirent
531 * @dip: the directory
532 * @bh: The buffer
533 * @dent: Pointer to list of dirents
534 *
535 * return first dirent whether bh points to leaf or stuffed dinode
536 *
537 * Returns: IS_LEAF, IS_DINODE, or -errno
538 */
539
540static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
541 struct gfs2_dirent **dent)
542{
543 struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
544
545 if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
546 if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
547 return -EIO;
548 *dent = (struct gfs2_dirent *)(bh->b_data +
549 sizeof(struct gfs2_leaf));
550 return IS_LEAF;
551 } else {
552 if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
553 return -EIO;
554 *dent = (struct gfs2_dirent *)(bh->b_data +
555 sizeof(struct gfs2_dinode));
556 return IS_DINODE;
557 }
558}
559
560static int dirent_check_reclen(struct gfs2_inode *dip, 528static int dirent_check_reclen(struct gfs2_inode *dip,
561 const struct gfs2_dirent *d, const void *end_p) 529 const struct gfs2_dirent *d, const void *end_p)
562{ 530{
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1006 divider = (start + half_len) << (32 - dip->i_depth); 974 divider = (start + half_len) << (32 - dip->i_depth);
1007 975
1008 /* Copy the entries */ 976 /* Copy the entries */
1009 dirent_first(dip, obh, &dent); 977 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
1010 978
1011 do { 979 do {
1012 next = dent; 980 next = dent;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8b674b1f3a55..f455a03a09e2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
241 int rv = 0; 241 int rv = 0;
242 242
243 write_lock(gl_lock_addr(gl->gl_hash)); 243 write_lock(gl_lock_addr(gl->gl_hash));
244 if (atomic_dec_and_test(&gl->gl_ref)) { 244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 hlist_del(&gl->gl_list); 245 hlist_del(&gl->gl_list);
246 write_unlock(gl_lock_addr(gl->gl_hash));
247 spin_lock(&lru_lock);
248 if (!list_empty(&gl->gl_lru)) { 246 if (!list_empty(&gl->gl_lru)) {
249 list_del_init(&gl->gl_lru); 247 list_del_init(&gl->gl_lru);
250 atomic_dec(&lru_count); 248 atomic_dec(&lru_count);
251 } 249 }
252 spin_unlock(&lru_lock); 250 spin_unlock(&lru_lock);
251 write_unlock(gl_lock_addr(gl->gl_hash));
253 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
254 glock_free(gl); 253 glock_free(gl);
255 rv = 1; 254 rv = 1;
@@ -513,7 +512,6 @@ retry:
513 GLOCK_BUG_ON(gl, 1); 512 GLOCK_BUG_ON(gl, 1);
514 } 513 }
515 spin_unlock(&gl->gl_spin); 514 spin_unlock(&gl->gl_spin);
516 gfs2_glock_put(gl);
517 return; 515 return;
518 } 516 }
519 517
@@ -524,8 +522,6 @@ retry:
524 if (glops->go_xmote_bh) { 522 if (glops->go_xmote_bh) {
525 spin_unlock(&gl->gl_spin); 523 spin_unlock(&gl->gl_spin);
526 rv = glops->go_xmote_bh(gl, gh); 524 rv = glops->go_xmote_bh(gl, gh);
527 if (rv == -EAGAIN)
528 return;
529 spin_lock(&gl->gl_spin); 525 spin_lock(&gl->gl_spin);
530 if (rv) { 526 if (rv) {
531 do_error(gl, rv); 527 do_error(gl, rv);
@@ -540,7 +536,6 @@ out:
540 clear_bit(GLF_LOCK, &gl->gl_flags); 536 clear_bit(GLF_LOCK, &gl->gl_flags);
541out_locked: 537out_locked:
542 spin_unlock(&gl->gl_spin); 538 spin_unlock(&gl->gl_spin);
543 gfs2_glock_put(gl);
544} 539}
545 540
546static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, 541static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
600 595
601 if (!(ret & LM_OUT_ASYNC)) { 596 if (!(ret & LM_OUT_ASYNC)) {
602 finish_xmote(gl, ret); 597 finish_xmote(gl, ret);
603 gfs2_glock_hold(gl);
604 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 598 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
605 gfs2_glock_put(gl); 599 gfs2_glock_put(gl);
606 } else { 600 } else {
@@ -672,12 +666,17 @@ out:
672 return; 666 return;
673 667
674out_sched: 668out_sched:
669 clear_bit(GLF_LOCK, &gl->gl_flags);
670 smp_mb__after_clear_bit();
675 gfs2_glock_hold(gl); 671 gfs2_glock_hold(gl);
676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 672 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
677 gfs2_glock_put_nolock(gl); 673 gfs2_glock_put_nolock(gl);
674 return;
675
678out_unlock: 676out_unlock:
679 clear_bit(GLF_LOCK, &gl->gl_flags); 677 clear_bit(GLF_LOCK, &gl->gl_flags);
680 goto out; 678 smp_mb__after_clear_bit();
679 return;
681} 680}
682 681
683static void delete_work_func(struct work_struct *work) 682static void delete_work_func(struct work_struct *work)
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
707{ 706{
708 unsigned long delay = 0; 707 unsigned long delay = 0;
709 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 708 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
709 int drop_ref = 0;
710 710
711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
712 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
713 drop_ref = 1;
714 }
713 down_read(&gfs2_umount_flush_sem); 715 down_read(&gfs2_umount_flush_sem);
714 spin_lock(&gl->gl_spin); 716 spin_lock(&gl->gl_spin);
715 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
727 if (!delay || 729 if (!delay ||
728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
729 gfs2_glock_put(gl); 731 gfs2_glock_put(gl);
732 if (drop_ref)
733 gfs2_glock_put(gl);
730} 734}
731 735
732/** 736/**
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1361 list_del_init(&gl->gl_lru); 1365 list_del_init(&gl->gl_lru);
1362 atomic_dec(&lru_count); 1366 atomic_dec(&lru_count);
1363 1367
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1368 /* Test for being demotable */ 1368 /* Test for being demotable */
1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1370 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1375 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1376 nr--; 1376 nr--;
1377 } 1377 }
1378 clear_bit(GLF_LOCK, &gl->gl_flags);
1379 smp_mb__after_clear_bit();
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1380 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl); 1381 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin); 1382 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1382 spin_lock(&lru_lock); 1383 spin_lock(&lru_lock);
1383 continue; 1384 continue;
1384 } 1385 }
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c609894ec0d0..13f0bd228132 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
180 return gl->gl_state == LM_ST_SHARED; 180 return gl->gl_state == LM_ST_SHARED;
181} 181}
182 182
183static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
184{
185 int ret;
186 spin_lock(&gl->gl_spin);
187 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
188 spin_unlock(&gl->gl_spin);
189 return ret;
190}
191
192int gfs2_glock_get(struct gfs2_sbd *sdp, 183int gfs2_glock_get(struct gfs2_sbd *sdp,
193 u64 number, const struct gfs2_glock_operations *glops, 184 u64 number, const struct gfs2_glock_operations *glops,
194 int create, struct gfs2_glock **glp); 185 int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 6985eef06c39..78554acc0605 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/bio.h> 15#include <linux/bio.h>
16#include <linux/posix_acl.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
184 if (flags & DIO_METADATA) { 185 if (flags & DIO_METADATA) {
185 struct address_space *mapping = gl->gl_aspace->i_mapping; 186 struct address_space *mapping = gl->gl_aspace->i_mapping;
186 truncate_inode_pages(mapping, 0); 187 truncate_inode_pages(mapping, 0);
187 if (ip) 188 if (ip) {
188 set_bit(GIF_INVALID, &ip->i_flags); 189 set_bit(GIF_INVALID, &ip->i_flags);
190 forget_all_cached_acls(&ip->i_inode);
191 }
189 } 192 }
190 193
191 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) 194 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6edb423f90b3..4792200978c8 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -429,7 +429,11 @@ struct gfs2_args {
429 unsigned int ar_meta:1; /* mount metafs */ 429 unsigned int ar_meta:1; /* mount metafs */
430 unsigned int ar_discard:1; /* discard requests */ 430 unsigned int ar_discard:1; /* discard requests */
431 unsigned int ar_errors:2; /* errors=withdraw | panic */ 431 unsigned int ar_errors:2; /* errors=withdraw | panic */
432 unsigned int ar_nobarrier:1; /* do not send barriers */
432 int ar_commit; /* Commit interval */ 433 int ar_commit; /* Commit interval */
434 int ar_statfs_quantum; /* The fast statfs interval */
435 int ar_quota_quantum; /* The quota interval */
436 int ar_statfs_percent; /* The % change to force sync */
433}; 437};
434 438
435struct gfs2_tune { 439struct gfs2_tune {
@@ -558,6 +562,7 @@ struct gfs2_sbd {
558 spinlock_t sd_statfs_spin; 562 spinlock_t sd_statfs_spin;
559 struct gfs2_statfs_change_host sd_statfs_master; 563 struct gfs2_statfs_change_host sd_statfs_master;
560 struct gfs2_statfs_change_host sd_statfs_local; 564 struct gfs2_statfs_change_host sd_statfs_local;
565 int sd_statfs_force_sync;
561 566
562 /* Resource group stuff */ 567 /* Resource group stuff */
563 568
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb15d3b1f409..26ba2a4c4a2d 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
871 if (error) 871 if (error)
872 goto fail_gunlock2; 872 goto fail_gunlock2;
873 873
874 error = gfs2_acl_create(dip, GFS2_I(inode)); 874 error = gfs2_acl_create(dip, inode);
875 if (error) 875 if (error)
876 goto fail_gunlock2; 876 goto fail_gunlock2;
877 877
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
947 947
948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
950 str->di_header.__pad0 = 0;
951 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 950 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
952 str->di_header.__pad1 = 0;
953 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 951 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
954 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 952 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
955 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 953 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 13c6237c5f67..4511b08fc451 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
596 memset(lh, 0, sizeof(struct gfs2_log_header)); 596 memset(lh, 0, sizeof(struct gfs2_log_header));
597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
599 lh->lh_header.__pad0 = cpu_to_be64(0);
599 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 600 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
601 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
600 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 602 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
601 lh->lh_flags = cpu_to_be32(flags); 603 lh->lh_flags = cpu_to_be32(flags);
602 lh->lh_tail = cpu_to_be32(tail); 604 lh->lh_tail = cpu_to_be32(tail);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9969ff062c5b..de97632ba32f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
133{ 133{
134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
135 struct gfs2_meta_header *mh;
135 struct gfs2_trans *tr; 136 struct gfs2_trans *tr;
136 137
137 lock_buffer(bd->bd_bh); 138 lock_buffer(bd->bd_bh);
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
148 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 149 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
149 gfs2_meta_check(sdp, bd->bd_bh); 150 gfs2_meta_check(sdp, bd->bd_bh);
150 gfs2_pin(sdp, bd->bd_bh); 151 gfs2_pin(sdp, bd->bd_bh);
152 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
153 mh->__pad0 = cpu_to_be64(0);
154 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
151 sdp->sd_log_num_buf++; 155 sdp->sd_log_num_buf++;
152 list_add(&le->le_list, &sdp->sd_log_le_buf); 156 list_add(&le->le_list, &sdp->sd_log_le_buf);
153 tr->tr_num_buf_new++; 157 tr->tr_num_buf_new++;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d082..5b31f7741a8f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
114 if (error) 114 if (error)
115 goto fail_unregister; 115 goto fail_unregister;
116 116
117 error = slow_work_register_user(); 117 error = slow_work_register_user(THIS_MODULE);
118 if (error) 118 if (error)
119 goto fail_slow; 119 goto fail_slow;
120 120
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
163 gfs2_unregister_debugfs(); 163 gfs2_unregister_debugfs();
164 unregister_filesystem(&gfs2_fs_type); 164 unregister_filesystem(&gfs2_fs_type);
165 unregister_filesystem(&gfs2meta_fs_type); 165 unregister_filesystem(&gfs2meta_fs_type);
166 slow_work_unregister_user(); 166 slow_work_unregister_user(THIS_MODULE);
167 167
168 kmem_cache_destroy(gfs2_quotad_cachep); 168 kmem_cache_destroy(gfs2_quotad_cachep);
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 169 kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 52fb6c048981..edfee24f3636 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/slow-work.h> 20#include <linux/slow-work.h>
21#include <linux/quotaops.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
62 gt->gt_quota_warn_period = 10; 63 gt->gt_quota_warn_period = 10;
63 gt->gt_quota_scale_num = 1; 64 gt->gt_quota_scale_num = 1;
64 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
65 gt->gt_quota_quantum = 60;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600; 68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 69 gt->gt_complain_secs = 10;
70 gt->gt_statfs_quantum = 30;
71 gt->gt_statfs_slow = 0;
72} 70}
73 71
74static struct gfs2_sbd *init_sbd(struct super_block *sb) 72static struct gfs2_sbd *init_sbd(struct super_block *sb)
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
1114 * Returns: errno 1112 * Returns: errno
1115 */ 1113 */
1116 1114
1117static int fill_super(struct super_block *sb, void *data, int silent) 1115static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1118{ 1116{
1119 struct gfs2_sbd *sdp; 1117 struct gfs2_sbd *sdp;
1120 struct gfs2_holder mount_gh; 1118 struct gfs2_holder mount_gh;
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1125 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); 1123 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
1126 return -ENOMEM; 1124 return -ENOMEM;
1127 } 1125 }
1128 1126 sdp->sd_args = *args;
1129 sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
1130 sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
1131 sdp->sd_args.ar_commit = 60;
1132 sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
1133
1134 error = gfs2_mount_args(sdp, &sdp->sd_args, data);
1135 if (error) {
1136 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1137 goto fail;
1138 }
1139 1127
1140 if (sdp->sd_args.ar_spectator) { 1128 if (sdp->sd_args.ar_spectator) {
1141 sb->s_flags |= MS_RDONLY; 1129 sb->s_flags |= MS_RDONLY;
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1143 } 1131 }
1144 if (sdp->sd_args.ar_posix_acl) 1132 if (sdp->sd_args.ar_posix_acl)
1145 sb->s_flags |= MS_POSIXACL; 1133 sb->s_flags |= MS_POSIXACL;
1134 if (sdp->sd_args.ar_nobarrier)
1135 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1146 1136
1147 sb->s_magic = GFS2_MAGIC; 1137 sb->s_magic = GFS2_MAGIC;
1148 sb->s_op = &gfs2_super_ops; 1138 sb->s_op = &gfs2_super_ops;
1149 sb->s_export_op = &gfs2_export_ops; 1139 sb->s_export_op = &gfs2_export_ops;
1150 sb->s_xattr = gfs2_xattr_handlers; 1140 sb->s_xattr = gfs2_xattr_handlers;
1141 sb->s_qcop = &gfs2_quotactl_ops;
1142 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1151 sb->s_time_gran = 1; 1143 sb->s_time_gran = 1;
1152 sb->s_maxbytes = MAX_LFS_FILESIZE; 1144 sb->s_maxbytes = MAX_LFS_FILESIZE;
1153 1145
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1160 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; 1152 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
1161 1153
1162 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; 1154 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
1155 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1156 if (sdp->sd_args.ar_statfs_quantum) {
1157 sdp->sd_tune.gt_statfs_slow = 0;
1158 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1159 }
1160 else {
1161 sdp->sd_tune.gt_statfs_slow = 1;
1162 sdp->sd_tune.gt_statfs_quantum = 30;
1163 }
1163 1164
1164 error = init_names(sdp, silent); 1165 error = init_names(sdp, silent);
1165 if (error) 1166 if (error)
@@ -1243,18 +1244,127 @@ fail:
1243 return error; 1244 return error;
1244} 1245}
1245 1246
1246static int gfs2_get_sb(struct file_system_type *fs_type, int flags, 1247static int set_gfs2_super(struct super_block *s, void *data)
1247 const char *dev_name, void *data, struct vfsmount *mnt)
1248{ 1248{
1249 return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); 1249 s->s_bdev = data;
1250 s->s_dev = s->s_bdev->bd_dev;
1251
1252 /*
1253 * We set the bdi here to the queue backing, file systems can
1254 * overwrite this in ->fill_super()
1255 */
1256 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1257 return 0;
1250} 1258}
1251 1259
1252static int test_meta_super(struct super_block *s, void *ptr) 1260static int test_gfs2_super(struct super_block *s, void *ptr)
1253{ 1261{
1254 struct block_device *bdev = ptr; 1262 struct block_device *bdev = ptr;
1255 return (bdev == s->s_bdev); 1263 return (bdev == s->s_bdev);
1256} 1264}
1257 1265
1266/**
1267 * gfs2_get_sb - Get the GFS2 superblock
1268 * @fs_type: The GFS2 filesystem type
1269 * @flags: Mount flags
1270 * @dev_name: The name of the device
1271 * @data: The mount arguments
1272 * @mnt: The vfsmnt for this mount
1273 *
1274 * Q. Why not use get_sb_bdev() ?
1275 * A. We need to select one of two root directories to mount, independent
1276 * of whether this is the initial, or subsequent, mount of this sb
1277 *
1278 * Returns: 0 or -ve on error
1279 */
1280
1281static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
1282 const char *dev_name, void *data, struct vfsmount *mnt)
1283{
1284 struct block_device *bdev;
1285 struct super_block *s;
1286 fmode_t mode = FMODE_READ;
1287 int error;
1288 struct gfs2_args args;
1289 struct gfs2_sbd *sdp;
1290
1291 if (!(flags & MS_RDONLY))
1292 mode |= FMODE_WRITE;
1293
1294 bdev = open_bdev_exclusive(dev_name, mode, fs_type);
1295 if (IS_ERR(bdev))
1296 return PTR_ERR(bdev);
1297
1298 /*
1299 * once the super is inserted into the list by sget, s_umount
1300 * will protect the lockfs code from trying to start a snapshot
1301 * while we are mounting
1302 */
1303 mutex_lock(&bdev->bd_fsfreeze_mutex);
1304 if (bdev->bd_fsfreeze_count > 0) {
1305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1306 error = -EBUSY;
1307 goto error_bdev;
1308 }
1309 s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
1310 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1311 error = PTR_ERR(s);
1312 if (IS_ERR(s))
1313 goto error_bdev;
1314
1315 memset(&args, 0, sizeof(args));
1316 args.ar_quota = GFS2_QUOTA_DEFAULT;
1317 args.ar_data = GFS2_DATA_DEFAULT;
1318 args.ar_commit = 60;
1319 args.ar_statfs_quantum = 30;
1320 args.ar_quota_quantum = 60;
1321 args.ar_errors = GFS2_ERRORS_DEFAULT;
1322
1323 error = gfs2_mount_args(&args, data);
1324 if (error) {
1325 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1326 if (s->s_root)
1327 goto error_super;
1328 deactivate_locked_super(s);
1329 return error;
1330 }
1331
1332 if (s->s_root) {
1333 error = -EBUSY;
1334 if ((flags ^ s->s_flags) & MS_RDONLY)
1335 goto error_super;
1336 close_bdev_exclusive(bdev, mode);
1337 } else {
1338 char b[BDEVNAME_SIZE];
1339
1340 s->s_flags = flags;
1341 s->s_mode = mode;
1342 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1343 sb_set_blocksize(s, block_size(bdev));
1344 error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
1345 if (error) {
1346 deactivate_locked_super(s);
1347 return error;
1348 }
1349 s->s_flags |= MS_ACTIVE;
1350 bdev->bd_super = s;
1351 }
1352
1353 sdp = s->s_fs_info;
1354 mnt->mnt_sb = s;
1355 if (args.ar_meta)
1356 mnt->mnt_root = dget(sdp->sd_master_dir);
1357 else
1358 mnt->mnt_root = dget(sdp->sd_root_dir);
1359 return 0;
1360
1361error_super:
1362 deactivate_locked_super(s);
1363error_bdev:
1364 close_bdev_exclusive(bdev, mode);
1365 return error;
1366}
1367
1258static int set_meta_super(struct super_block *s, void *ptr) 1368static int set_meta_super(struct super_block *s, void *ptr)
1259{ 1369{
1260 return -EINVAL; 1370 return -EINVAL;
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
1274 dev_name, error); 1384 dev_name, error);
1275 return error; 1385 return error;
1276 } 1386 }
1277 s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, 1387 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
1278 path.dentry->d_inode->i_sb->s_bdev); 1388 path.dentry->d_inode->i_sb->s_bdev);
1279 path_put(&path); 1389 path_put(&path);
1280 if (IS_ERR(s)) { 1390 if (IS_ERR(s)) {
1281 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); 1391 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
1282 return PTR_ERR(s); 1392 return PTR_ERR(s);
1283 } 1393 }
1394 if ((flags ^ s->s_flags) & MS_RDONLY) {
1395 deactivate_locked_super(s);
1396 return -EBUSY;
1397 }
1284 sdp = s->s_fs_info; 1398 sdp = s->s_fs_info;
1285 mnt->mnt_sb = s; 1399 mnt->mnt_sb = s;
1286 mnt->mnt_root = dget(sdp->sd_master_dir); 1400 mnt->mnt_root = dget(sdp->sd_master_dir);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e9b9326bfc9..e3bf6eab8750 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -15,7 +15,7 @@
15 * fuzziness in the current usage value of IDs that are being used on different 15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on 16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable. 17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check 18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that. 19 * program to be run on node crashes or anything like that.
20 * 20 *
21 * There are couple of knobs that let the administrator manage the quota 21 * There are couple of knobs that let the administrator manage the quota
@@ -47,6 +47,8 @@
47#include <linux/gfs2_ondisk.h> 47#include <linux/gfs2_ondisk.h>
48#include <linux/kthread.h> 48#include <linux/kthread.h>
49#include <linux/freezer.h> 49#include <linux/freezer.h>
50#include <linux/quota.h>
51#include <linux/dqblk_xfs.h>
50 52
51#include "gfs2.h" 53#include "gfs2.h"
52#include "incore.h" 54#include "incore.h"
@@ -65,13 +67,6 @@
65#define QUOTA_USER 1 67#define QUOTA_USER 1
66#define QUOTA_GROUP 0 68#define QUOTA_GROUP 0
67 69
68struct gfs2_quota_host {
69 u64 qu_limit;
70 u64 qu_warn;
71 s64 qu_value;
72 u32 qu_ll_next;
73};
74
75struct gfs2_quota_change_host { 70struct gfs2_quota_change_host {
76 u64 qc_change; 71 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */ 72 u32 qc_flags; /* GFS2_QCF_... */
@@ -164,7 +159,7 @@ fail:
164 return error; 159 return error;
165} 160}
166 161
167static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
168 struct gfs2_quota_data **qdp) 163 struct gfs2_quota_data **qdp)
169{ 164{
170 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
202 197
203 spin_unlock(&qd_lru_lock); 198 spin_unlock(&qd_lru_lock);
204 199
205 if (qd || !create) { 200 if (qd) {
206 if (new_qd) { 201 if (new_qd) {
207 gfs2_glock_put(new_qd->qd_gl); 202 gfs2_glock_put(new_qd->qd_gl);
208 kmem_cache_free(gfs2_quotad_cachep, new_qd); 203 kmem_cache_free(gfs2_quotad_cachep, new_qd);
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
461 qd_put(qd); 456 qd_put(qd);
462} 457}
463 458
464static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
465 struct gfs2_quota_data **qdp) 460 struct gfs2_quota_data **qdp)
466{ 461{
467 int error; 462 int error;
468 463
469 error = qd_get(sdp, user, id, create, qdp); 464 error = qd_get(sdp, user, id, qdp);
470 if (error) 465 if (error)
471 return error; 466 return error;
472 467
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
508 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
509 return 0; 504 return 0;
510 505
511 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
512 if (error) 507 if (error)
513 goto out; 508 goto out;
514 al->al_qd_num++; 509 al->al_qd_num++;
515 qd++; 510 qd++;
516 511
517 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
518 if (error) 513 if (error)
519 goto out; 514 goto out;
520 al->al_qd_num++; 515 al->al_qd_num++;
521 qd++; 516 qd++;
522 517
523 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
524 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
525 if (error) 520 if (error)
526 goto out; 521 goto out;
527 al->al_qd_num++; 522 al->al_qd_num++;
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
529 } 524 }
530 525
531 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
532 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
533 if (error) 528 if (error)
534 goto out; 529 goto out;
535 al->al_qd_num++; 530 al->al_qd_num++;
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
617 mutex_unlock(&sdp->sd_quota_mutex); 612 mutex_unlock(&sdp->sd_quota_mutex);
618} 613}
619 614
620static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
621{
622 const struct gfs2_quota *str = buf;
623
624 qu->qu_limit = be64_to_cpu(str->qu_limit);
625 qu->qu_warn = be64_to_cpu(str->qu_warn);
626 qu->qu_value = be64_to_cpu(str->qu_value);
627 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
628}
629
630static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
631{
632 struct gfs2_quota *str = buf;
633
634 str->qu_limit = cpu_to_be64(qu->qu_limit);
635 str->qu_warn = cpu_to_be64(qu->qu_warn);
636 str->qu_value = cpu_to_be64(qu->qu_value);
637 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
638 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
639}
640
641/** 615/**
642 * gfs2_adjust_quota 616 * gfs2_adjust_quota - adjust record of current block usage
617 * @ip: The quota inode
618 * @loc: Offset of the entry in the quota file
619 * @change: The amount of usage change to record
620 * @qd: The quota data
621 * @fdq: The updated limits to record
643 * 622 *
644 * This function was mostly borrowed from gfs2_block_truncate_page which was 623 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3 624 * in turn mostly borrowed from ext3
625 *
626 * Returns: 0 or -ve on error
646 */ 627 */
628
647static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
648 s64 change, struct gfs2_quota_data *qd) 630 s64 change, struct gfs2_quota_data *qd,
631 struct fs_disk_quota *fdq)
649{ 632{
650 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
651 struct address_space *mapping = inode->i_mapping; 634 struct address_space *mapping = inode->i_mapping;
652 unsigned long index = loc >> PAGE_CACHE_SHIFT; 635 unsigned long index = loc >> PAGE_CACHE_SHIFT;
653 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
654 unsigned blocksize, iblock, pos; 637 unsigned blocksize, iblock, pos;
655 struct buffer_head *bh; 638 struct buffer_head *bh, *dibh;
656 struct page *page; 639 struct page *page;
657 void *kaddr; 640 void *kaddr;
658 char *ptr; 641 struct gfs2_quota *qp;
659 struct gfs2_quota_host qp;
660 s64 value; 642 s64 value;
661 int err = -EIO; 643 int err = -EIO;
644 u64 size;
662 645
663 if (gfs2_is_stuffed(ip)) 646 if (gfs2_is_stuffed(ip))
664 gfs2_unstuff_dinode(ip, NULL); 647 gfs2_unstuff_dinode(ip, NULL);
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
700 gfs2_trans_add_bh(ip->i_gl, bh, 0); 683 gfs2_trans_add_bh(ip->i_gl, bh, 0);
701 684
702 kaddr = kmap_atomic(page, KM_USER0); 685 kaddr = kmap_atomic(page, KM_USER0);
703 ptr = kaddr + offset; 686 qp = kaddr + offset;
704 gfs2_quota_in(&qp, ptr); 687 value = (s64)be64_to_cpu(qp->qu_value) + change;
705 qp.qu_value += change; 688 qp->qu_value = cpu_to_be64(value);
706 value = qp.qu_value; 689 qd->qd_qb.qb_value = qp->qu_value;
707 gfs2_quota_out(&qp, ptr); 690 if (fdq) {
691 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
692 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
693 qd->qd_qb.qb_warn = qp->qu_warn;
694 }
695 if (fdq->d_fieldmask & FS_DQ_BHARD) {
696 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
697 qd->qd_qb.qb_limit = qp->qu_limit;
698 }
699 }
708 flush_dcache_page(page); 700 flush_dcache_page(page);
709 kunmap_atomic(kaddr, KM_USER0); 701 kunmap_atomic(kaddr, KM_USER0);
710 err = 0; 702
711 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); 703 err = gfs2_meta_inode_buffer(ip, &dibh);
712 qd->qd_qb.qb_value = cpu_to_be64(value); 704 if (err)
713 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); 705 goto unlock;
714 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); 706
707 size = loc + sizeof(struct gfs2_quota);
708 if (size > inode->i_size) {
709 ip->i_disksize = size;
710 i_size_write(inode, size);
711 }
712 inode->i_mtime = inode->i_atime = CURRENT_TIME;
713 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
714 gfs2_dinode_out(ip, dibh->b_data);
715 brelse(dibh);
716 mark_inode_dirty(inode);
717
715unlock: 718unlock:
716 unlock_page(page); 719 unlock_page(page);
717 page_cache_release(page); 720 page_cache_release(page);
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
739 return -ENOMEM; 742 return -ENOMEM;
740 743
741 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 744 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
745 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
742 for (qx = 0; qx < num_qd; qx++) { 746 for (qx = 0; qx < num_qd; qx++) {
743 error = gfs2_glock_nq_init(qda[qx]->qd_gl, 747 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
744 LM_ST_EXCLUSIVE,
745 GL_NOCACHE, &ghs[qx]); 748 GL_NOCACHE, &ghs[qx]);
746 if (error) 749 if (error)
747 goto out; 750 goto out;
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
795 for (x = 0; x < num_qd; x++) { 798 for (x = 0; x < num_qd; x++) {
796 qd = qda[x]; 799 qd = qda[x];
797 offset = qd2offset(qd); 800 offset = qd2offset(qd);
798 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, 801 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
799 (struct gfs2_quota_data *)
800 qd);
801 if (error) 802 if (error)
802 goto out_end_trans; 803 goto out_end_trans;
803 804
@@ -817,21 +818,44 @@ out_gunlock:
817out: 818out:
818 while (qx--) 819 while (qx--)
819 gfs2_glock_dq_uninit(&ghs[qx]); 820 gfs2_glock_dq_uninit(&ghs[qx]);
821 mutex_unlock(&ip->i_inode.i_mutex);
820 kfree(ghs); 822 kfree(ghs);
821 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 823 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
822 return error; 824 return error;
823} 825}
824 826
827static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
828{
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_quota q;
831 struct gfs2_quota_lvb *qlvb;
832 loff_t pos;
833 int error;
834
835 memset(&q, 0, sizeof(struct gfs2_quota));
836 pos = qd2offset(qd);
837 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
838 if (error < 0)
839 return error;
840
841 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
843 qlvb->__pad = 0;
844 qlvb->qb_limit = q.qu_limit;
845 qlvb->qb_warn = q.qu_warn;
846 qlvb->qb_value = q.qu_value;
847 qd->qd_qb = *qlvb;
848
849 return 0;
850}
851
825static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 852static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
826 struct gfs2_holder *q_gh) 853 struct gfs2_holder *q_gh)
827{ 854{
828 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 855 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 856 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_holder i_gh; 857 struct gfs2_holder i_gh;
831 struct gfs2_quota_host q;
832 char buf[sizeof(struct gfs2_quota)];
833 int error; 858 int error;
834 struct gfs2_quota_lvb *qlvb;
835 859
836restart: 860restart:
837 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 861 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
@@ -841,11 +865,9 @@ restart:
841 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 865 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 866
843 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 867 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
844 loff_t pos;
845 gfs2_glock_dq_uninit(q_gh); 868 gfs2_glock_dq_uninit(q_gh);
846 error = gfs2_glock_nq_init(qd->qd_gl, 869 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
847 LM_ST_EXCLUSIVE, GL_NOCACHE, 870 GL_NOCACHE, q_gh);
848 q_gh);
849 if (error) 871 if (error)
850 return error; 872 return error;
851 873
@@ -853,29 +875,14 @@ restart:
853 if (error) 875 if (error)
854 goto fail; 876 goto fail;
855 877
856 memset(buf, 0, sizeof(struct gfs2_quota)); 878 error = update_qd(sdp, qd);
857 pos = qd2offset(qd); 879 if (error)
858 error = gfs2_internal_read(ip, NULL, buf, &pos,
859 sizeof(struct gfs2_quota));
860 if (error < 0)
861 goto fail_gunlock; 880 goto fail_gunlock;
862 881
863 gfs2_glock_dq_uninit(&i_gh); 882 gfs2_glock_dq_uninit(&i_gh);
864 883 gfs2_glock_dq_uninit(q_gh);
865 gfs2_quota_in(&q, buf); 884 force_refresh = 0;
866 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 885 goto restart;
867 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
868 qlvb->__pad = 0;
869 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
870 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
871 qlvb->qb_value = cpu_to_be64(q.qu_value);
872 qd->qd_qb = *qlvb;
873
874 if (gfs2_glock_is_blocking(qd->qd_gl)) {
875 gfs2_glock_dq_uninit(q_gh);
876 force_refresh = 0;
877 goto restart;
878 }
879 } 886 }
880 887
881 return 0; 888 return 0;
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
995{ 1002{
996 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1003 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
997 1004
998 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", 1005 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
999 sdp->sd_fsname, type, 1006 sdp->sd_fsname, type,
1000 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1007 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1001 qd->qd_id); 1008 qd->qd_id);
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1032 1039
1033 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1040 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1034 print_message(qd, "exceeded"); 1041 print_message(qd, "exceeded");
1042 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1043 USRQUOTA : GRPQUOTA, qd->qd_id,
1044 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1045
1035 error = -EDQUOT; 1046 error = -EDQUOT;
1036 break; 1047 break;
1037 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1048 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039 time_after_eq(jiffies, qd->qd_last_warn + 1050 time_after_eq(jiffies, qd->qd_last_warn +
1040 gfs2_tune_get(sdp, 1051 gfs2_tune_get(sdp,
1041 gt_quota_warn_period) * HZ)) { 1052 gt_quota_warn_period) * HZ)) {
1053 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1054 USRQUOTA : GRPQUOTA, qd->qd_id,
1055 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1042 error = print_message(qd, "warning"); 1056 error = print_message(qd, "warning");
1043 qd->qd_last_warn = jiffies; 1057 qd->qd_last_warn = jiffies;
1044 } 1058 }
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1069 } 1083 }
1070} 1084}
1071 1085
1072int gfs2_quota_sync(struct gfs2_sbd *sdp) 1086int gfs2_quota_sync(struct super_block *sb, int type)
1073{ 1087{
1088 struct gfs2_sbd *sdp = sb->s_fs_info;
1074 struct gfs2_quota_data **qda; 1089 struct gfs2_quota_data **qda;
1075 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1090 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1076 unsigned int num_qd; 1091 unsigned int num_qd;
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1118 struct gfs2_holder q_gh; 1133 struct gfs2_holder q_gh;
1119 int error; 1134 int error;
1120 1135
1121 error = qd_get(sdp, user, id, CREATE, &qd); 1136 error = qd_get(sdp, user, id, &qd);
1122 if (error) 1137 if (error)
1123 return error; 1138 return error;
1124 1139
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1127 gfs2_glock_dq_uninit(&q_gh); 1142 gfs2_glock_dq_uninit(&q_gh);
1128 1143
1129 qd_put(qd); 1144 qd_put(qd);
1130
1131 return error; 1145 return error;
1132} 1146}
1133 1147
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1298} 1312}
1299 1313
1300static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1314static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1301 int (*fxn)(struct gfs2_sbd *sdp), 1315 int (*fxn)(struct super_block *sb, int type),
1302 unsigned long t, unsigned long *timeo, 1316 unsigned long t, unsigned long *timeo,
1303 unsigned int *new_timeo) 1317 unsigned int *new_timeo)
1304{ 1318{
1305 if (t >= *timeo) { 1319 if (t >= *timeo) {
1306 int error = fxn(sdp); 1320 int error = fxn(sdp->sd_vfs, 0);
1307 quotad_error(sdp, msg, error); 1321 quotad_error(sdp, msg, error);
1308 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1322 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1309 } else { 1323 } else {
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1330 } 1344 }
1331} 1345}
1332 1346
1347void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1348 if (!sdp->sd_statfs_force_sync) {
1349 sdp->sd_statfs_force_sync = 1;
1350 wake_up(&sdp->sd_quota_wait);
1351 }
1352}
1353
1354
1333/** 1355/**
1334 * gfs2_quotad - Write cached quota changes into the quota file 1356 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock 1357 * @sdp: Pointer to GFS2 superblock
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
1349 while (!kthread_should_stop()) { 1371 while (!kthread_should_stop()) {
1350 1372
1351 /* Update the master statfs file */ 1373 /* Update the master statfs file */
1352 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1374 if (sdp->sd_statfs_force_sync) {
1353 &statfs_timeo, &tune->gt_statfs_quantum); 1375 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1376 quotad_error(sdp, "statfs", error);
1377 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1378 }
1379 else
1380 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1381 &statfs_timeo,
1382 &tune->gt_statfs_quantum);
1354 1383
1355 /* Update quota file */ 1384 /* Update quota file */
1356 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1385 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
1367 spin_lock(&sdp->sd_trunc_lock); 1396 spin_lock(&sdp->sd_trunc_lock);
1368 empty = list_empty(&sdp->sd_trunc_list); 1397 empty = list_empty(&sdp->sd_trunc_list);
1369 spin_unlock(&sdp->sd_trunc_lock); 1398 spin_unlock(&sdp->sd_trunc_lock);
1370 if (empty) 1399 if (empty && !sdp->sd_statfs_force_sync)
1371 t -= schedule_timeout(t); 1400 t -= schedule_timeout(t);
1372 else 1401 else
1373 t = 0; 1402 t = 0;
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
1377 return 0; 1406 return 0;
1378} 1407}
1379 1408
1409static int gfs2_quota_get_xstate(struct super_block *sb,
1410 struct fs_quota_stat *fqs)
1411{
1412 struct gfs2_sbd *sdp = sb->s_fs_info;
1413
1414 memset(fqs, 0, sizeof(struct fs_quota_stat));
1415 fqs->qs_version = FS_QSTAT_VERSION;
1416 if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
1417 fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
1418 else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
1419 fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
1420 if (sdp->sd_quota_inode) {
1421 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1422 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1423 }
1424 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1425 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1426 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1427 return 0;
1428}
1429
1430static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
1431 struct fs_disk_quota *fdq)
1432{
1433 struct gfs2_sbd *sdp = sb->s_fs_info;
1434 struct gfs2_quota_lvb *qlvb;
1435 struct gfs2_quota_data *qd;
1436 struct gfs2_holder q_gh;
1437 int error;
1438
1439 memset(fdq, 0, sizeof(struct fs_disk_quota));
1440
1441 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1442 return -ESRCH; /* Crazy XFS error code */
1443
1444 if (type == USRQUOTA)
1445 type = QUOTA_USER;
1446 else if (type == GRPQUOTA)
1447 type = QUOTA_GROUP;
1448 else
1449 return -EINVAL;
1450
1451 error = qd_get(sdp, type, id, &qd);
1452 if (error)
1453 return error;
1454 error = do_glock(qd, FORCE, &q_gh);
1455 if (error)
1456 goto out;
1457
1458 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1459 fdq->d_version = FS_DQUOT_VERSION;
1460 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
1461 fdq->d_id = id;
1462 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
1463 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
1464 fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
1465
1466 gfs2_glock_dq_uninit(&q_gh);
1467out:
1468 qd_put(qd);
1469 return error;
1470}
1471
1472/* GFS2 only supports a subset of the XFS fields */
1473#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1474
1475static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
1476 struct fs_disk_quota *fdq)
1477{
1478 struct gfs2_sbd *sdp = sb->s_fs_info;
1479 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1480 struct gfs2_quota_data *qd;
1481 struct gfs2_holder q_gh, i_gh;
1482 unsigned int data_blocks, ind_blocks;
1483 unsigned int blocks = 0;
1484 int alloc_required;
1485 struct gfs2_alloc *al;
1486 loff_t offset;
1487 int error;
1488
1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1490 return -ESRCH; /* Crazy XFS error code */
1491
1492 switch(type) {
1493 case USRQUOTA:
1494 type = QUOTA_USER;
1495 if (fdq->d_flags != XFS_USER_QUOTA)
1496 return -EINVAL;
1497 break;
1498 case GRPQUOTA:
1499 type = QUOTA_GROUP;
1500 if (fdq->d_flags != XFS_GROUP_QUOTA)
1501 return -EINVAL;
1502 break;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1508 return -EINVAL;
1509 if (fdq->d_id != id)
1510 return -EINVAL;
1511
1512 error = qd_get(sdp, type, id, &qd);
1513 if (error)
1514 return error;
1515
1516 mutex_lock(&ip->i_inode.i_mutex);
1517 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1518 if (error)
1519 goto out_put;
1520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1521 if (error)
1522 goto out_q;
1523
1524 /* Check for existing entry, if none then alloc new blocks */
1525 error = update_qd(sdp, qd);
1526 if (error)
1527 goto out_i;
1528
1529 /* If nothing has changed, this is a no-op */
1530 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1531 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
1532 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1533 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1534 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
1535 fdq->d_fieldmask ^= FS_DQ_BHARD;
1536 if (fdq->d_fieldmask == 0)
1537 goto out_i;
1538
1539 offset = qd2offset(qd);
1540 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
1541 &alloc_required);
1542 if (error)
1543 goto out_i;
1544 if (alloc_required) {
1545 al = gfs2_alloc_get(ip);
1546 if (al == NULL)
1547 goto out_i;
1548 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1549 &data_blocks, &ind_blocks);
1550 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1551 error = gfs2_inplace_reserve(ip);
1552 if (error)
1553 goto out_alloc;
1554 }
1555
1556 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1557 if (error)
1558 goto out_release;
1559
1560 /* Apply changes */
1561 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1562
1563 gfs2_trans_end(sdp);
1564out_release:
1565 if (alloc_required) {
1566 gfs2_inplace_release(ip);
1567out_alloc:
1568 gfs2_alloc_put(ip);
1569 }
1570out_i:
1571 gfs2_glock_dq_uninit(&i_gh);
1572out_q:
1573 gfs2_glock_dq_uninit(&q_gh);
1574out_put:
1575 mutex_unlock(&ip->i_inode.i_mutex);
1576 qd_put(qd);
1577 return error;
1578}
1579
1580const struct quotactl_ops gfs2_quotactl_ops = {
1581 .quota_sync = gfs2_quota_sync,
1582 .get_xstate = gfs2_quota_get_xstate,
1583 .get_xquota = gfs2_xquota_get,
1584 .set_xquota = gfs2_xquota_set,
1585};
1586
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0fa5fa63d0e8..e271fa07ad02 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
26 u32 uid, u32 gid); 26 u32 uid, u32 gid);
27 27
28extern int gfs2_quota_sync(struct gfs2_sbd *sdp); 28extern int gfs2_quota_sync(struct super_block *sb, int type);
29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
30 30
31extern int gfs2_quota_init(struct gfs2_sbd *sdp); 31extern int gfs2_quota_init(struct gfs2_sbd *sdp);
32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
33extern int gfs2_quotad(void *data); 33extern int gfs2_quotad(void *data);
34 34
35extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
36
35static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) 37static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
36{ 38{
37 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 39 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
50} 52}
51 53
52extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); 54extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
55extern const struct quotactl_ops gfs2_quotactl_ops;
53 56
54#endif /* __QUOTA_DOT_H__ */ 57#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d3..4b9bece3d437 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -7,6 +7,7 @@
7 * of the GNU General Public License version 2. 7 * of the GNU General Public License version 2.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/completion.h> 13#include <linux/completion.h>
@@ -409,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
409 memset(lh, 0, sizeof(struct gfs2_log_header)); 410 memset(lh, 0, sizeof(struct gfs2_log_header));
410 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
411 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
413 lh->lh_header.__pad0 = cpu_to_be64(0);
412 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 414 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
415 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
413 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); 416 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
414 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); 417 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
415 lh->lh_blkno = cpu_to_be32(lblock); 418 lh->lh_blkno = cpu_to_be32(lblock);
@@ -593,6 +596,7 @@ fail:
593} 596}
594 597
595struct slow_work_ops gfs2_recover_ops = { 598struct slow_work_ops gfs2_recover_ops = {
599 .owner = THIS_MODULE,
596 .get_ref = gfs2_recover_get_ref, 600 .get_ref = gfs2_recover_get_ref,
597 .put_ref = gfs2_recover_put_ref, 601 .put_ref = gfs2_recover_put_ref,
598 .execute = gfs2_recover_work, 602 .execute = gfs2_recover_work,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8f1cfb02a6cb..0608f490c295 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1710{ 1710{
1711 struct gfs2_rgrpd *rgd; 1711 struct gfs2_rgrpd *rgd;
1712 struct gfs2_holder ri_gh, rgd_gh; 1712 struct gfs2_holder ri_gh, rgd_gh;
1713 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1714 int ri_locked = 0;
1713 int error; 1715 int error;
1714 1716
1715 error = gfs2_rindex_hold(sdp, &ri_gh); 1717 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1716 if (error) 1718 error = gfs2_rindex_hold(sdp, &ri_gh);
1717 goto fail; 1719 if (error)
1720 goto fail;
1721 ri_locked = 1;
1722 }
1718 1723
1719 error = -EINVAL; 1724 error = -EINVAL;
1720 rgd = gfs2_blk2rgrpd(sdp, no_addr); 1725 rgd = gfs2_blk2rgrpd(sdp, no_addr);
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1730 1735
1731 gfs2_glock_dq_uninit(&rgd_gh); 1736 gfs2_glock_dq_uninit(&rgd_gh);
1732fail_rindex: 1737fail_rindex:
1733 gfs2_glock_dq_uninit(&ri_gh); 1738 if (ri_locked)
1739 gfs2_glock_dq_uninit(&ri_gh);
1734fail: 1740fail:
1735 return error; 1741 return error;
1736} 1742}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0ec3ec672de1..c282ad41f3d1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -70,6 +70,11 @@ enum {
70 Opt_commit, 70 Opt_commit,
71 Opt_err_withdraw, 71 Opt_err_withdraw,
72 Opt_err_panic, 72 Opt_err_panic,
73 Opt_statfs_quantum,
74 Opt_statfs_percent,
75 Opt_quota_quantum,
76 Opt_barrier,
77 Opt_nobarrier,
73 Opt_error, 78 Opt_error,
74}; 79};
75 80
@@ -101,18 +106,23 @@ static const match_table_t tokens = {
101 {Opt_commit, "commit=%d"}, 106 {Opt_commit, "commit=%d"},
102 {Opt_err_withdraw, "errors=withdraw"}, 107 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"}, 108 {Opt_err_panic, "errors=panic"},
109 {Opt_statfs_quantum, "statfs_quantum=%d"},
110 {Opt_statfs_percent, "statfs_percent=%d"},
111 {Opt_quota_quantum, "quota_quantum=%d"},
112 {Opt_barrier, "barrier"},
113 {Opt_nobarrier, "nobarrier"},
104 {Opt_error, NULL} 114 {Opt_error, NULL}
105}; 115};
106 116
107/** 117/**
108 * gfs2_mount_args - Parse mount options 118 * gfs2_mount_args - Parse mount options
109 * @sdp: 119 * @args: The structure into which the parsed options will be written
110 * @data: 120 * @options: The options to parse
111 * 121 *
112 * Return: errno 122 * Return: errno
113 */ 123 */
114 124
115int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) 125int gfs2_mount_args(struct gfs2_args *args, char *options)
116{ 126{
117 char *o; 127 char *o;
118 int token; 128 int token;
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
157 break; 167 break;
158 case Opt_debug: 168 case Opt_debug:
159 if (args->ar_errors == GFS2_ERRORS_PANIC) { 169 if (args->ar_errors == GFS2_ERRORS_PANIC) {
160 fs_info(sdp, "-o debug and -o errors=panic " 170 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
161 "are mutually exclusive.\n"); 171 "are mutually exclusive.\n");
162 return -EINVAL; 172 return -EINVAL;
163 } 173 }
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
210 case Opt_commit: 220 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit); 221 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) { 222 if (rv || args->ar_commit <= 0) {
213 fs_info(sdp, "commit mount option requires a positive numeric argument\n"); 223 printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
224 return rv ? rv : -EINVAL;
225 }
226 break;
227 case Opt_statfs_quantum:
228 rv = match_int(&tmp[0], &args->ar_statfs_quantum);
229 if (rv || args->ar_statfs_quantum < 0) {
230 printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
231 return rv ? rv : -EINVAL;
232 }
233 break;
234 case Opt_quota_quantum:
235 rv = match_int(&tmp[0], &args->ar_quota_quantum);
236 if (rv || args->ar_quota_quantum <= 0) {
237 printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
238 return rv ? rv : -EINVAL;
239 }
240 break;
241 case Opt_statfs_percent:
242 rv = match_int(&tmp[0], &args->ar_statfs_percent);
243 if (rv || args->ar_statfs_percent < 0 ||
244 args->ar_statfs_percent > 100) {
245 printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
214 return rv ? rv : -EINVAL; 246 return rv ? rv : -EINVAL;
215 } 247 }
216 break; 248 break;
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
219 break; 251 break;
220 case Opt_err_panic: 252 case Opt_err_panic:
221 if (args->ar_debug) { 253 if (args->ar_debug) {
222 fs_info(sdp, "-o debug and -o errors=panic " 254 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
223 "are mutually exclusive.\n"); 255 "are mutually exclusive.\n");
224 return -EINVAL; 256 return -EINVAL;
225 } 257 }
226 args->ar_errors = GFS2_ERRORS_PANIC; 258 args->ar_errors = GFS2_ERRORS_PANIC;
227 break; 259 break;
260 case Opt_barrier:
261 args->ar_nobarrier = 0;
262 break;
263 case Opt_nobarrier:
264 args->ar_nobarrier = 1;
265 break;
228 case Opt_error: 266 case Opt_error:
229 default: 267 default:
230 fs_info(sdp, "invalid mount option: %s\n", o); 268 printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
231 return -EINVAL; 269 return -EINVAL;
232 } 270 }
233 } 271 }
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
442{ 480{
443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 481 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 482 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
483 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
445 struct buffer_head *l_bh; 484 struct buffer_head *l_bh;
485 s64 x, y;
486 int need_sync = 0;
446 int error; 487 int error;
447 488
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 489 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
456 l_sc->sc_free += free; 497 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes; 498 l_sc->sc_dinodes += dinodes;
458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 499 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
500 if (sdp->sd_args.ar_statfs_percent) {
501 x = 100 * l_sc->sc_free;
502 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
503 if (x >= y || x <= -y)
504 need_sync = 1;
505 }
459 spin_unlock(&sdp->sd_statfs_spin); 506 spin_unlock(&sdp->sd_statfs_spin);
460 507
461 brelse(l_bh); 508 brelse(l_bh);
509 if (need_sync)
510 gfs2_wake_up_statfs(sdp);
462} 511}
463 512
464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 513void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 533 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485} 534}
486 535
487int gfs2_statfs_sync(struct gfs2_sbd *sdp) 536int gfs2_statfs_sync(struct super_block *sb, int type)
488{ 537{
538 struct gfs2_sbd *sdp = sb->s_fs_info;
489 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 539 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
490 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 540 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
491 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
521 goto out_bh2; 571 goto out_bh2;
522 572
523 update_statfs(sdp, m_bh, l_bh); 573 update_statfs(sdp, m_bh, l_bh);
574 sdp->sd_statfs_force_sync = 0;
524 575
525 gfs2_trans_end(sdp); 576 gfs2_trans_end(sdp);
526 577
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
712 int error; 763 int error;
713 764
714 flush_workqueue(gfs2_delete_workqueue); 765 flush_workqueue(gfs2_delete_workqueue);
715 gfs2_quota_sync(sdp); 766 gfs2_quota_sync(sdp->sd_vfs, 0);
716 gfs2_statfs_sync(sdp); 767 gfs2_statfs_sync(sdp->sd_vfs, 0);
717 768
718 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, 769 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
719 &t_gh); 770 &t_gh);
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1061 1112
1062 spin_lock(&gt->gt_spin); 1113 spin_lock(&gt->gt_spin);
1063 args.ar_commit = gt->gt_log_flush_secs; 1114 args.ar_commit = gt->gt_log_flush_secs;
1115 args.ar_quota_quantum = gt->gt_quota_quantum;
1116 if (gt->gt_statfs_slow)
1117 args.ar_statfs_quantum = 0;
1118 else
1119 args.ar_statfs_quantum = gt->gt_statfs_quantum;
1064 spin_unlock(&gt->gt_spin); 1120 spin_unlock(&gt->gt_spin);
1065 error = gfs2_mount_args(sdp, &args, data); 1121 error = gfs2_mount_args(&args, data);
1066 if (error) 1122 if (error)
1067 return error; 1123 return error;
1068 1124
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1097 sb->s_flags |= MS_POSIXACL; 1153 sb->s_flags |= MS_POSIXACL;
1098 else 1154 else
1099 sb->s_flags &= ~MS_POSIXACL; 1155 sb->s_flags &= ~MS_POSIXACL;
1156 if (sdp->sd_args.ar_nobarrier)
1157 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1158 else
1159 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1100 spin_lock(&gt->gt_spin); 1160 spin_lock(&gt->gt_spin);
1101 gt->gt_log_flush_secs = args.ar_commit; 1161 gt->gt_log_flush_secs = args.ar_commit;
1162 gt->gt_quota_quantum = args.ar_quota_quantum;
1163 if (args.ar_statfs_quantum) {
1164 gt->gt_statfs_slow = 0;
1165 gt->gt_statfs_quantum = args.ar_statfs_quantum;
1166 }
1167 else {
1168 gt->gt_statfs_slow = 1;
1169 gt->gt_statfs_quantum = 30;
1170 }
1102 spin_unlock(&gt->gt_spin); 1171 spin_unlock(&gt->gt_spin);
1103 1172
1104 gfs2_online_uevent(sdp); 1173 gfs2_online_uevent(sdp);
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1179{ 1248{
1180 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; 1249 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1181 struct gfs2_args *args = &sdp->sd_args; 1250 struct gfs2_args *args = &sdp->sd_args;
1182 int lfsecs; 1251 int val;
1183 1252
1184 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) 1253 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1185 seq_printf(s, ",meta"); 1254 seq_printf(s, ",meta");
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1240 } 1309 }
1241 if (args->ar_discard) 1310 if (args->ar_discard)
1242 seq_printf(s, ",discard"); 1311 seq_printf(s, ",discard");
1243 lfsecs = sdp->sd_tune.gt_log_flush_secs; 1312 val = sdp->sd_tune.gt_log_flush_secs;
1244 if (lfsecs != 60) 1313 if (val != 60)
1245 seq_printf(s, ",commit=%d", lfsecs); 1314 seq_printf(s, ",commit=%d", val);
1315 val = sdp->sd_tune.gt_statfs_quantum;
1316 if (val != 30)
1317 seq_printf(s, ",statfs_quantum=%d", val);
1318 val = sdp->sd_tune.gt_quota_quantum;
1319 if (val != 60)
1320 seq_printf(s, ",quota_quantum=%d", val);
1321 if (args->ar_statfs_percent)
1322 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1246 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1323 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1247 const char *state; 1324 const char *state;
1248 1325
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1259 } 1336 }
1260 seq_printf(s, ",errors=%s", state); 1337 seq_printf(s, ",errors=%s", state);
1261 } 1338 }
1339 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1340 seq_printf(s, ",nobarrier");
1341
1262 return 0; 1342 return 0;
1263} 1343}
1264 1344
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 235db3682885..3df60f2d84e3 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
27 27
28extern void gfs2_jindex_free(struct gfs2_sbd *sdp); 28extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
29 29
30extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); 30extern int gfs2_mount_args(struct gfs2_args *args, char *data);
31 31
32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); 33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf); 44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh); 46 struct buffer_head *l_bh);
47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct super_block *sb, int type);
48 48
49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 446329728d52..c5dad1eb7b91 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
158 if (simple_strtol(buf, NULL, 0) != 1) 158 if (simple_strtol(buf, NULL, 0) != 1)
159 return -EINVAL; 159 return -EINVAL;
160 160
161 gfs2_statfs_sync(sdp); 161 gfs2_statfs_sync(sdp->sd_vfs, 0);
162 return len; 162 return len;
163} 163}
164 164
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
171 if (simple_strtol(buf, NULL, 0) != 1) 171 if (simple_strtol(buf, NULL, 0) != 1)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 gfs2_quota_sync(sdp); 174 gfs2_quota_sync(sdp->sd_vfs, 0);
175 return len; 175 return len;
176} 176}
177 177
178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
179 size_t len) 179 size_t len)
180{ 180{
181 int error;
181 u32 id; 182 u32 id;
182 183
183 if (!capable(CAP_SYS_ADMIN)) 184 if (!capable(CAP_SYS_ADMIN))
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
185 186
186 id = simple_strtoul(buf, NULL, 0); 187 id = simple_strtoul(buf, NULL, 0);
187 188
188 gfs2_quota_refresh(sdp, 1, id); 189 error = gfs2_quota_refresh(sdp, 1, id);
189 return len; 190 return error ? error : len;
190} 191}
191 192
192static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 193static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
193 size_t len) 194 size_t len)
194{ 195{
196 int error;
195 u32 id; 197 u32 id;
196 198
197 if (!capable(CAP_SYS_ADMIN)) 199 if (!capable(CAP_SYS_ADMIN))
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
199 201
200 id = simple_strtoul(buf, NULL, 0); 202 id = simple_strtoul(buf, NULL, 0);
201 203
202 gfs2_quota_refresh(sdp, 0, id); 204 error = gfs2_quota_refresh(sdp, 0, id);
203 return len; 205 return error ? error : len;
204} 206}
205 207
206static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 208static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a0f8ef6ee27..912f5cbc4740 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
186 return 0; 186 return 0;
187} 187}
188 188
189int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 189static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190 struct gfs2_ea_location *el) 190 struct gfs2_ea_location *el)
191{ 191{
192 struct ea_find ef; 192 struct ea_find ef;
193 int error; 193 int error;
@@ -516,8 +516,8 @@ out:
516 return error; 516 return error;
517} 517}
518 518
519int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 519static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
520 char *data, size_t size) 520 char *data, size_t size)
521{ 521{
522 int ret; 522 int ret;
523 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 523 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
534 return len; 534 return len;
535} 535}
536 536
537int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
538{
539 struct gfs2_ea_location el;
540 int error;
541 int len;
542 char *data;
543
544 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
545 if (error)
546 return error;
547 if (!el.el_ea)
548 goto out;
549 if (!GFS2_EA_DATA_LEN(el.el_ea))
550 goto out;
551
552 len = GFS2_EA_DATA_LEN(el.el_ea);
553 data = kmalloc(len, GFP_NOFS);
554 error = -ENOMEM;
555 if (data == NULL)
556 goto out;
557
558 error = gfs2_ea_get_copy(ip, &el, data, len);
559 if (error == 0)
560 error = len;
561 *ppdata = data;
562out:
563 brelse(el.el_bh);
564 return error;
565}
566
537/** 567/**
538 * gfs2_xattr_get - Get a GFS2 extended attribute 568 * gfs2_xattr_get - Get a GFS2 extended attribute
539 * @inode: The inode 569 * @inode: The inode
@@ -1259,22 +1289,26 @@ fail:
1259 return error; 1289 return error;
1260} 1290}
1261 1291
1262int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, 1292int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1263 struct iattr *attr, char *data)
1264{ 1293{
1294 struct gfs2_ea_location el;
1265 struct buffer_head *dibh; 1295 struct buffer_head *dibh;
1266 int error; 1296 int error;
1267 1297
1268 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 1298 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1299 if (error)
1300 return error;
1301
1302 if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1269 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1303 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1270 if (error) 1304 if (error)
1271 return error; 1305 return error;
1272 1306
1273 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 1307 gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1274 memcpy(GFS2_EA2DATA(el->el_ea), data, 1308 memcpy(GFS2_EA2DATA(el.el_ea), data,
1275 GFS2_EA_DATA_LEN(el->el_ea)); 1309 GFS2_EA_DATA_LEN(el.el_ea));
1276 } else 1310 } else
1277 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); 1311 error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1278 1312
1279 if (error) 1313 if (error)
1280 return error; 1314 return error;
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
1507 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); 1541 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
1508} 1542}
1509 1543
1510static int gfs2_xattr_system_get(struct inode *inode, const char *name,
1511 void *buffer, size_t size)
1512{
1513 return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
1514}
1515
1516static int gfs2_xattr_system_set(struct inode *inode, const char *name,
1517 const void *value, size_t size, int flags)
1518{
1519 return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
1520}
1521
1522static int gfs2_xattr_security_get(struct inode *inode, const char *name, 1544static int gfs2_xattr_security_get(struct inode *inode, const char *name,
1523 void *buffer, size_t size) 1545 void *buffer, size_t size)
1524{ 1546{
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
1543 .set = gfs2_xattr_security_set, 1565 .set = gfs2_xattr_security_set,
1544}; 1566};
1545 1567
1546static struct xattr_handler gfs2_xattr_system_handler = {
1547 .prefix = XATTR_SYSTEM_PREFIX,
1548 .get = gfs2_xattr_system_get,
1549 .set = gfs2_xattr_system_set,
1550};
1551
1552struct xattr_handler *gfs2_xattr_handlers[] = { 1568struct xattr_handler *gfs2_xattr_handlers[] = {
1553 &gfs2_xattr_user_handler, 1569 &gfs2_xattr_user_handler,
1554 &gfs2_xattr_security_handler, 1570 &gfs2_xattr_security_handler,
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index cbdfd7743733..8d6ae5813c4d 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
62 62
63/* Exported to acl.c */ 63/* Exported to acl.c */
64 64
65extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 65extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
66 struct gfs2_ea_location *el); 66extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
67extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
68 char *data, size_t size);
69extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
70 struct iattr *attr, char *data);
71 67
72#endif /* __EATTR_DOT_H__ */ 68#endif /* __EATTR_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 4d8e3be55976..06c1f02de611 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,7 +18,6 @@
18#include <linux/hash.h> 18#include <linux/hash.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/ima.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/cdev.h> 22#include <linux/cdev.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -157,11 +156,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
157 156
158 if (security_inode_alloc(inode)) 157 if (security_inode_alloc(inode))
159 goto out; 158 goto out;
160
161 /* allocate and initialize an i_integrity */
162 if (ima_inode_alloc(inode))
163 goto out_free_security;
164
165 spin_lock_init(&inode->i_lock); 159 spin_lock_init(&inode->i_lock);
166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
167 161
@@ -201,9 +195,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
201#endif 195#endif
202 196
203 return 0; 197 return 0;
204
205out_free_security:
206 security_inode_free(inode);
207out: 198out:
208 return -ENOMEM; 199 return -ENOMEM;
209} 200}
@@ -235,7 +226,6 @@ static struct inode *alloc_inode(struct super_block *sb)
235void __destroy_inode(struct inode *inode) 226void __destroy_inode(struct inode *inode)
236{ 227{
237 BUG_ON(inode_has_buffers(inode)); 228 BUG_ON(inode_has_buffers(inode));
238 ima_inode_free(inode);
239 security_inode_free(inode); 229 security_inode_free(inode);
240 fsnotify_inode_delete(inode); 230 fsnotify_inode_delete(inode);
241#ifdef CONFIG_FS_POSIX_ACL 231#ifdef CONFIG_FS_POSIX_ACL
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index f25e70c1b51c..f0294410868d 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -177,7 +177,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
177 spin_unlock(&jffs2_compressor_list_lock); 177 spin_unlock(&jffs2_compressor_list_lock);
178 break; 178 break;
179 default: 179 default:
180 printk(KERN_ERR "JFFS2: unknow compression mode.\n"); 180 printk(KERN_ERR "JFFS2: unknown compression mode.\n");
181 } 181 }
182 out: 182 out:
183 if (ret == JFFS2_COMPR_NONE) { 183 if (ret == JFFS2_COMPR_NONE) {
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index cfe05c1966a5..3f39be1b0455 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
164 164
165 /* XXX FIXME: Where a single physical node actually shows up in two 165 /* XXX FIXME: Where a single physical node actually shows up in two
166 frags, we read it twice. Don't do that. */ 166 frags, we read it twice. Don't do that. */
167 /* Now we're pointing at the first frag which overlaps our page */ 167 /* Now we're pointing at the first frag which overlaps our page
168 * (or perhaps is before it, if we've been asked to read off the
169 * end of the file). */
168 while(offset < end) { 170 while(offset < end) {
169 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); 171 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
170 if (unlikely(!frag || frag->ofs > offset)) { 172 if (unlikely(!frag || frag->ofs > offset ||
173 frag->ofs + frag->size <= offset)) {
171 uint32_t holesize = end - offset; 174 uint32_t holesize = end - offset;
172 if (frag) { 175 if (frag && frag->ofs > offset) {
173 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); 176 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
174 holesize = min(holesize, frag->ofs - offset); 177 holesize = min(holesize, frag->ofs - offset);
175 } 178 }
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 1a80301004b8..378991cfe40f 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -931,7 +931,7 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re
931 * Helper function for jffs2_get_inode_nodes(). 931 * Helper function for jffs2_get_inode_nodes().
932 * The function detects whether more data should be read and reads it if yes. 932 * The function detects whether more data should be read and reads it if yes.
933 * 933 *
934 * Returns: 0 on succes; 934 * Returns: 0 on success;
935 * negative error code on failure. 935 * negative error code on failure.
936 */ 936 */
937static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, 937static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 082e844ab2db..4b107881acd5 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -31,7 +31,7 @@
31 * is used to release xattr name/value pair and detach from c->xattrindex. 31 * is used to release xattr name/value pair and detach from c->xattrindex.
32 * reclaim_xattr_datum(c) 32 * reclaim_xattr_datum(c)
33 * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when 33 * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
34 * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold 34 * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
35 * is hard coded as 32KiB. 35 * is hard coded as 32KiB.
36 * do_verify_xattr_datum(c, xd) 36 * do_verify_xattr_datum(c, xd)
37 * is used to load the xdatum informations without name/value pair from the medium. 37 * is used to load the xdatum informations without name/value pair from the medium.
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 2bc7d8aa5740..d9b031cf69f5 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -755,7 +755,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
755 * allocation group. 755 * allocation group.
756 */ 756 */
757 if ((blkno & (bmp->db_agsize - 1)) == 0) 757 if ((blkno & (bmp->db_agsize - 1)) == 0)
758 /* check if the AG is currenly being written to. 758 /* check if the AG is currently being written to.
759 * if so, call dbNextAG() to find a non-busy 759 * if so, call dbNextAG() to find a non-busy
760 * AG with sufficient free space. 760 * AG with sufficient free space.
761 */ 761 */
@@ -3337,7 +3337,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
3337 for (i = 0, n = 0; i < agno; n++) { 3337 for (i = 0, n = 0; i < agno; n++) {
3338 bmp->db_agfree[n] = 0; /* init collection point */ 3338 bmp->db_agfree[n] = 0; /* init collection point */
3339 3339
3340 /* coalesce cotiguous k AGs; */ 3340 /* coalesce contiguous k AGs; */
3341 for (j = 0; j < k && i < agno; j++, i++) { 3341 for (j = 0; j < k && i < agno; j++, i++) {
3342 /* merge AGi to AGn */ 3342 /* merge AGi to AGn */
3343 bmp->db_agfree[n] += bmp->db_agfree[i]; 3343 bmp->db_agfree[n] += bmp->db_agfree[i];
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 1a54ae14a192..e50cfa3d9654 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -371,82 +371,74 @@ EXPORT_SYMBOL_GPL(lockd_down);
371 371
372static ctl_table nlm_sysctls[] = { 372static ctl_table nlm_sysctls[] = {
373 { 373 {
374 .ctl_name = CTL_UNNUMBERED,
375 .procname = "nlm_grace_period", 374 .procname = "nlm_grace_period",
376 .data = &nlm_grace_period, 375 .data = &nlm_grace_period,
377 .maxlen = sizeof(unsigned long), 376 .maxlen = sizeof(unsigned long),
378 .mode = 0644, 377 .mode = 0644,
379 .proc_handler = &proc_doulongvec_minmax, 378 .proc_handler = proc_doulongvec_minmax,
380 .extra1 = (unsigned long *) &nlm_grace_period_min, 379 .extra1 = (unsigned long *) &nlm_grace_period_min,
381 .extra2 = (unsigned long *) &nlm_grace_period_max, 380 .extra2 = (unsigned long *) &nlm_grace_period_max,
382 }, 381 },
383 { 382 {
384 .ctl_name = CTL_UNNUMBERED,
385 .procname = "nlm_timeout", 383 .procname = "nlm_timeout",
386 .data = &nlm_timeout, 384 .data = &nlm_timeout,
387 .maxlen = sizeof(unsigned long), 385 .maxlen = sizeof(unsigned long),
388 .mode = 0644, 386 .mode = 0644,
389 .proc_handler = &proc_doulongvec_minmax, 387 .proc_handler = proc_doulongvec_minmax,
390 .extra1 = (unsigned long *) &nlm_timeout_min, 388 .extra1 = (unsigned long *) &nlm_timeout_min,
391 .extra2 = (unsigned long *) &nlm_timeout_max, 389 .extra2 = (unsigned long *) &nlm_timeout_max,
392 }, 390 },
393 { 391 {
394 .ctl_name = CTL_UNNUMBERED,
395 .procname = "nlm_udpport", 392 .procname = "nlm_udpport",
396 .data = &nlm_udpport, 393 .data = &nlm_udpport,
397 .maxlen = sizeof(int), 394 .maxlen = sizeof(int),
398 .mode = 0644, 395 .mode = 0644,
399 .proc_handler = &proc_dointvec_minmax, 396 .proc_handler = proc_dointvec_minmax,
400 .extra1 = (int *) &nlm_port_min, 397 .extra1 = (int *) &nlm_port_min,
401 .extra2 = (int *) &nlm_port_max, 398 .extra2 = (int *) &nlm_port_max,
402 }, 399 },
403 { 400 {
404 .ctl_name = CTL_UNNUMBERED,
405 .procname = "nlm_tcpport", 401 .procname = "nlm_tcpport",
406 .data = &nlm_tcpport, 402 .data = &nlm_tcpport,
407 .maxlen = sizeof(int), 403 .maxlen = sizeof(int),
408 .mode = 0644, 404 .mode = 0644,
409 .proc_handler = &proc_dointvec_minmax, 405 .proc_handler = proc_dointvec_minmax,
410 .extra1 = (int *) &nlm_port_min, 406 .extra1 = (int *) &nlm_port_min,
411 .extra2 = (int *) &nlm_port_max, 407 .extra2 = (int *) &nlm_port_max,
412 }, 408 },
413 { 409 {
414 .ctl_name = CTL_UNNUMBERED,
415 .procname = "nsm_use_hostnames", 410 .procname = "nsm_use_hostnames",
416 .data = &nsm_use_hostnames, 411 .data = &nsm_use_hostnames,
417 .maxlen = sizeof(int), 412 .maxlen = sizeof(int),
418 .mode = 0644, 413 .mode = 0644,
419 .proc_handler = &proc_dointvec, 414 .proc_handler = proc_dointvec,
420 }, 415 },
421 { 416 {
422 .ctl_name = CTL_UNNUMBERED,
423 .procname = "nsm_local_state", 417 .procname = "nsm_local_state",
424 .data = &nsm_local_state, 418 .data = &nsm_local_state,
425 .maxlen = sizeof(int), 419 .maxlen = sizeof(int),
426 .mode = 0644, 420 .mode = 0644,
427 .proc_handler = &proc_dointvec, 421 .proc_handler = proc_dointvec,
428 }, 422 },
429 { .ctl_name = 0 } 423 { }
430}; 424};
431 425
432static ctl_table nlm_sysctl_dir[] = { 426static ctl_table nlm_sysctl_dir[] = {
433 { 427 {
434 .ctl_name = CTL_UNNUMBERED,
435 .procname = "nfs", 428 .procname = "nfs",
436 .mode = 0555, 429 .mode = 0555,
437 .child = nlm_sysctls, 430 .child = nlm_sysctls,
438 }, 431 },
439 { .ctl_name = 0 } 432 { }
440}; 433};
441 434
442static ctl_table nlm_sysctl_root[] = { 435static ctl_table nlm_sysctl_root[] = {
443 { 436 {
444 .ctl_name = CTL_FS,
445 .procname = "fs", 437 .procname = "fs",
446 .mode = 0555, 438 .mode = 0555,
447 .child = nlm_sysctl_dir, 439 .child = nlm_sysctl_dir,
448 }, 440 },
449 { .ctl_name = 0 } 441 { }
450}; 442};
451 443
452#endif /* CONFIG_SYSCTL */ 444#endif /* CONFIG_SYSCTL */
diff --git a/fs/namespace.c b/fs/namespace.c
index bdc3cb4fd222..7d70d63ceb29 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1921,6 +1921,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1921 if (data_page) 1921 if (data_page)
1922 ((char *)data_page)[PAGE_SIZE - 1] = 0; 1922 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1923 1923
1924 /* ... and get the mountpoint */
1925 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1926 if (retval)
1927 return retval;
1928
1929 retval = security_sb_mount(dev_name, &path,
1930 type_page, flags, data_page);
1931 if (retval)
1932 goto dput_out;
1933
1924 /* Default to relatime unless overriden */ 1934 /* Default to relatime unless overriden */
1925 if (!(flags & MS_NOATIME)) 1935 if (!(flags & MS_NOATIME))
1926 mnt_flags |= MNT_RELATIME; 1936 mnt_flags |= MNT_RELATIME;
@@ -1945,16 +1955,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1945 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 1955 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
1946 MS_STRICTATIME); 1956 MS_STRICTATIME);
1947 1957
1948 /* ... and get the mountpoint */
1949 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1950 if (retval)
1951 return retval;
1952
1953 retval = security_sb_mount(dev_name, &path,
1954 type_page, flags, data_page);
1955 if (retval)
1956 goto dput_out;
1957
1958 if (flags & MS_REMOUNT) 1958 if (flags & MS_REMOUNT)
1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
1960 data_page); 1960 data_page);
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 0d58caf4a6e1..ec8f45f12e05 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -835,7 +835,7 @@ static int ncp_ioctl_need_write(unsigned int cmd)
835 case NCP_IOC_SETROOT: 835 case NCP_IOC_SETROOT:
836 return 0; 836 return 0;
837 default: 837 default:
838 /* unkown IOCTL command, assume write */ 838 /* unknown IOCTL command, assume write */
839 return 1; 839 return 1;
840 } 840 }
841} 841}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 70fad69eb959..fa588006588d 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
359 359
360 BUG_ON(!cookie); 360 BUG_ON(!cookie);
361 361
362 if (fscache_check_page_write(cookie, page)) {
363 if (!(gfp & __GFP_WAIT))
364 return 0;
365 fscache_wait_on_page_write(cookie, page);
366 }
367
368 if (PageFsCache(page)) { 362 if (PageFsCache(page)) {
369 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
370 cookie, page, nfsi); 364 cookie, page, nfsi);
371 365
372 fscache_uncache_page(cookie, page); 366 if (!fscache_maybe_release_page(cookie, page, gfp))
367 return 0;
368
373 nfs_add_fscache_stats(page->mapping->host, 369 nfs_add_fscache_stats(page->mapping->host,
374 NFSIOS_FSCACHE_PAGES_UNCACHED, 1); 370 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
375 } 371 }
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index b62481dabae9..70e1fbbaaeab 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -22,63 +22,55 @@ static struct ctl_table_header *nfs_callback_sysctl_table;
22static ctl_table nfs_cb_sysctls[] = { 22static ctl_table nfs_cb_sysctls[] = {
23#ifdef CONFIG_NFS_V4 23#ifdef CONFIG_NFS_V4
24 { 24 {
25 .ctl_name = CTL_UNNUMBERED,
26 .procname = "nfs_callback_tcpport", 25 .procname = "nfs_callback_tcpport",
27 .data = &nfs_callback_set_tcpport, 26 .data = &nfs_callback_set_tcpport,
28 .maxlen = sizeof(int), 27 .maxlen = sizeof(int),
29 .mode = 0644, 28 .mode = 0644,
30 .proc_handler = &proc_dointvec_minmax, 29 .proc_handler = proc_dointvec_minmax,
31 .extra1 = (int *)&nfs_set_port_min, 30 .extra1 = (int *)&nfs_set_port_min,
32 .extra2 = (int *)&nfs_set_port_max, 31 .extra2 = (int *)&nfs_set_port_max,
33 }, 32 },
34 { 33 {
35 .ctl_name = CTL_UNNUMBERED,
36 .procname = "idmap_cache_timeout", 34 .procname = "idmap_cache_timeout",
37 .data = &nfs_idmap_cache_timeout, 35 .data = &nfs_idmap_cache_timeout,
38 .maxlen = sizeof(int), 36 .maxlen = sizeof(int),
39 .mode = 0644, 37 .mode = 0644,
40 .proc_handler = &proc_dointvec_jiffies, 38 .proc_handler = proc_dointvec_jiffies,
41 .strategy = &sysctl_jiffies,
42 }, 39 },
43#endif 40#endif
44 { 41 {
45 .ctl_name = CTL_UNNUMBERED,
46 .procname = "nfs_mountpoint_timeout", 42 .procname = "nfs_mountpoint_timeout",
47 .data = &nfs_mountpoint_expiry_timeout, 43 .data = &nfs_mountpoint_expiry_timeout,
48 .maxlen = sizeof(nfs_mountpoint_expiry_timeout), 44 .maxlen = sizeof(nfs_mountpoint_expiry_timeout),
49 .mode = 0644, 45 .mode = 0644,
50 .proc_handler = &proc_dointvec_jiffies, 46 .proc_handler = proc_dointvec_jiffies,
51 .strategy = &sysctl_jiffies,
52 }, 47 },
53 { 48 {
54 .ctl_name = CTL_UNNUMBERED,
55 .procname = "nfs_congestion_kb", 49 .procname = "nfs_congestion_kb",
56 .data = &nfs_congestion_kb, 50 .data = &nfs_congestion_kb,
57 .maxlen = sizeof(nfs_congestion_kb), 51 .maxlen = sizeof(nfs_congestion_kb),
58 .mode = 0644, 52 .mode = 0644,
59 .proc_handler = &proc_dointvec, 53 .proc_handler = proc_dointvec,
60 }, 54 },
61 { .ctl_name = 0 } 55 { }
62}; 56};
63 57
64static ctl_table nfs_cb_sysctl_dir[] = { 58static ctl_table nfs_cb_sysctl_dir[] = {
65 { 59 {
66 .ctl_name = CTL_UNNUMBERED,
67 .procname = "nfs", 60 .procname = "nfs",
68 .mode = 0555, 61 .mode = 0555,
69 .child = nfs_cb_sysctls, 62 .child = nfs_cb_sysctls,
70 }, 63 },
71 { .ctl_name = 0 } 64 { }
72}; 65};
73 66
74static ctl_table nfs_cb_sysctl_root[] = { 67static ctl_table nfs_cb_sysctl_root[] = {
75 { 68 {
76 .ctl_name = CTL_FS,
77 .procname = "fs", 69 .procname = "fs",
78 .mode = 0555, 70 .mode = 0555,
79 .child = nfs_cb_sysctl_dir, 71 .child = nfs_cb_sysctl_dir,
80 }, 72 },
81 { .ctl_name = 0 } 73 { }
82}; 74};
83 75
84int nfs_register_sysctl(void) 76int nfs_register_sysctl(void)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53eb26c16b50..c84b5cc1a943 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -178,7 +178,7 @@ static int wb_priority(struct writeback_control *wbc)
178{ 178{
179 if (wbc->for_reclaim) 179 if (wbc->for_reclaim)
180 return FLUSH_HIGHPRI | FLUSH_STABLE; 180 return FLUSH_HIGHPRI | FLUSH_STABLE;
181 if (wbc->for_kupdate) 181 if (wbc->for_kupdate || wbc->for_background)
182 return FLUSH_LOWPRI; 182 return FLUSH_LOWPRI;
183 return 0; 183 return 0;
184} 184}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index dcd2040d330c..5ef5f365a5c8 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -69,36 +69,30 @@ static int zero;
69 69
70ctl_table inotify_table[] = { 70ctl_table inotify_table[] = {
71 { 71 {
72 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
73 .procname = "max_user_instances", 72 .procname = "max_user_instances",
74 .data = &inotify_max_user_instances, 73 .data = &inotify_max_user_instances,
75 .maxlen = sizeof(int), 74 .maxlen = sizeof(int),
76 .mode = 0644, 75 .mode = 0644,
77 .proc_handler = &proc_dointvec_minmax, 76 .proc_handler = proc_dointvec_minmax,
78 .strategy = &sysctl_intvec,
79 .extra1 = &zero, 77 .extra1 = &zero,
80 }, 78 },
81 { 79 {
82 .ctl_name = INOTIFY_MAX_USER_WATCHES,
83 .procname = "max_user_watches", 80 .procname = "max_user_watches",
84 .data = &inotify_max_user_watches, 81 .data = &inotify_max_user_watches,
85 .maxlen = sizeof(int), 82 .maxlen = sizeof(int),
86 .mode = 0644, 83 .mode = 0644,
87 .proc_handler = &proc_dointvec_minmax, 84 .proc_handler = proc_dointvec_minmax,
88 .strategy = &sysctl_intvec,
89 .extra1 = &zero, 85 .extra1 = &zero,
90 }, 86 },
91 { 87 {
92 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
93 .procname = "max_queued_events", 88 .procname = "max_queued_events",
94 .data = &inotify_max_queued_events, 89 .data = &inotify_max_queued_events,
95 .maxlen = sizeof(int), 90 .maxlen = sizeof(int),
96 .mode = 0644, 91 .mode = 0644,
97 .proc_handler = &proc_dointvec_minmax, 92 .proc_handler = proc_dointvec_minmax,
98 .strategy = &sysctl_intvec,
99 .extra1 = &zero 93 .extra1 = &zero
100 }, 94 },
101 { .ctl_name = 0 } 95 { }
102}; 96};
103#endif /* CONFIG_SYSCTL */ 97#endif /* CONFIG_SYSCTL */
104 98
@@ -747,10 +741,6 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
747 741
748 /* create/update an inode mark */ 742 /* create/update an inode mark */
749 ret = inotify_update_watch(group, inode, mask); 743 ret = inotify_update_watch(group, inode, mask);
750 if (unlikely(ret))
751 goto path_put_and_out;
752
753path_put_and_out:
754 path_put(&path); 744 path_put(&path);
755fput_and_out: 745fput_and_out:
756 fput_light(filp, fput_needed); 746 fput_light(filp, fput_needed);
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 9669541d0119..08f7530e9341 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -927,7 +927,7 @@ lock_retry_remap:
927 return 0; 927 return 0;
928 928
929 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ? 929 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
930 "EOVERFLOW" : (!err ? "EIO" : "unkown error")); 930 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
931 return err < 0 ? err : -EIO; 931 return err < 0 ? err : -EIO;
932 932
933read_err: 933read_err:
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 663c0e341f8b..43179ddd336f 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -399,7 +399,7 @@ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
399 * @cached_page: allocated but as yet unused page 399 * @cached_page: allocated but as yet unused page
400 * @lru_pvec: lru-buffering pagevec of caller 400 * @lru_pvec: lru-buffering pagevec of caller
401 * 401 *
402 * Obtain @nr_pages locked page cache pages from the mapping @maping and 402 * Obtain @nr_pages locked page cache pages from the mapping @mapping and
403 * starting at index @index. 403 * starting at index @index.
404 * 404 *
405 * If a page is newly created, increment its refcount and add it to the 405 * If a page is newly created, increment its refcount and add it to the
@@ -1281,7 +1281,7 @@ rl_not_mapped_enoent:
1281 1281
1282/* 1282/*
1283 * Copy as much as we can into the pages and return the number of bytes which 1283 * Copy as much as we can into the pages and return the number of bytes which
1284 * were sucessfully copied. If a fault is encountered then clear the pages 1284 * were successfully copied. If a fault is encountered then clear the pages
1285 * out to (ofs + bytes) and return the number of bytes which were copied. 1285 * out to (ofs + bytes) and return the number of bytes which were copied.
1286 */ 1286 */
1287static inline size_t ntfs_copy_from_user(struct page **pages, 1287static inline size_t ntfs_copy_from_user(struct page **pages,
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 89b02985c054..4dadcdf3d451 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -338,7 +338,7 @@ err_out:
338 * copy of the complete multi sector transfer deprotected page. On failure, 338 * copy of the complete multi sector transfer deprotected page. On failure,
339 * *@wrp is undefined. 339 * *@wrp is undefined.
340 * 340 *
341 * Simillarly, if @lsn is not NULL, on succes *@lsn will be set to the current 341 * Simillarly, if @lsn is not NULL, on success *@lsn will be set to the current
342 * logfile lsn according to this restart page. On failure, *@lsn is undefined. 342 * logfile lsn according to this restart page. On failure, *@lsn is undefined.
343 * 343 *
344 * The following error codes are defined: 344 * The following error codes are defined:
diff --git a/fs/ntfs/sysctl.c b/fs/ntfs/sysctl.c
index 9ef85e628fe1..79a89184cb5e 100644
--- a/fs/ntfs/sysctl.c
+++ b/fs/ntfs/sysctl.c
@@ -36,12 +36,11 @@
36/* Definition of the ntfs sysctl. */ 36/* Definition of the ntfs sysctl. */
37static ctl_table ntfs_sysctls[] = { 37static ctl_table ntfs_sysctls[] = {
38 { 38 {
39 .ctl_name = CTL_UNNUMBERED, /* Binary and text IDs. */
40 .procname = "ntfs-debug", 39 .procname = "ntfs-debug",
41 .data = &debug_msgs, /* Data pointer and size. */ 40 .data = &debug_msgs, /* Data pointer and size. */
42 .maxlen = sizeof(debug_msgs), 41 .maxlen = sizeof(debug_msgs),
43 .mode = 0644, /* Mode, proc handler. */ 42 .mode = 0644, /* Mode, proc handler. */
44 .proc_handler = &proc_dointvec 43 .proc_handler = proc_dointvec
45 }, 44 },
46 {} 45 {}
47}; 46};
@@ -49,7 +48,6 @@ static ctl_table ntfs_sysctls[] = {
49/* Define the parent directory /proc/sys/fs. */ 48/* Define the parent directory /proc/sys/fs. */
50static ctl_table sysctls_root[] = { 49static ctl_table sysctls_root[] = {
51 { 50 {
52 .ctl_name = CTL_FS,
53 .procname = "fs", 51 .procname = "fs",
54 .mode = 0555, 52 .mode = 0555,
55 .child = ntfs_sysctls 53 .child = ntfs_sysctls
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 38a42f5d59ff..7c7198a5bc90 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2398,7 +2398,7 @@ static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
2398 * 2398 *
2399 * The array is assumed to be large enough to hold an entire path (tree depth). 2399 * The array is assumed to be large enough to hold an entire path (tree depth).
2400 * 2400 *
2401 * Upon succesful return from this function: 2401 * Upon successful return from this function:
2402 * 2402 *
2403 * - The 'right_path' array will contain a path to the leaf block 2403 * - The 'right_path' array will contain a path to the leaf block
2404 * whose range contains e_cpos. 2404 * whose range contains e_cpos.
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index a1163b8b417c..b7428c5d0d3b 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -47,7 +47,7 @@
47 * Calculate the bit offset in the hamming code buffer based on the bit's 47 * Calculate the bit offset in the hamming code buffer based on the bit's
48 * offset in the data buffer. Since the hamming code reserves all 48 * offset in the data buffer. Since the hamming code reserves all
49 * power-of-two bits for parity, the data bit number and the code bit 49 * power-of-two bits for parity, the data bit number and the code bit
50 * number are offest by all the parity bits beforehand. 50 * number are offset by all the parity bits beforehand.
51 * 51 *
52 * Recall that bit numbers in hamming code are 1-based. This function 52 * Recall that bit numbers in hamming code are 1-based. This function
53 * takes the 0-based data bit from the caller. 53 * takes the 0-based data bit from the caller.
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index da794bc07a6c..a3f150e52b02 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -294,10 +294,10 @@ static int sc_seq_show(struct seq_file *seq, void *v)
294 if (sc->sc_sock) { 294 if (sc->sc_sock) {
295 inet = inet_sk(sc->sc_sock->sk); 295 inet = inet_sk(sc->sc_sock->sk);
296 /* the stack's structs aren't sparse endian clean */ 296 /* the stack's structs aren't sparse endian clean */
297 saddr = (__force __be32)inet->saddr; 297 saddr = (__force __be32)inet->inet_saddr;
298 daddr = (__force __be32)inet->daddr; 298 daddr = (__force __be32)inet->inet_daddr;
299 sport = (__force __be16)inet->sport; 299 sport = (__force __be16)inet->inet_sport;
300 dport = (__force __be16)inet->dport; 300 dport = (__force __be16)inet->inet_dport;
301 } 301 }
302 302
303 /* XXX sigh, inet-> doesn't have sparse annotation so any 303 /* XXX sigh, inet-> doesn't have sparse annotation so any
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 83bcaf266b35..03ccf9a7b1f4 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2586,7 +2586,7 @@ fail:
2586 * is complete everywhere. if the target dies while this is 2586 * is complete everywhere. if the target dies while this is
2587 * going on, some nodes could potentially see the target as the 2587 * going on, some nodes could potentially see the target as the
2588 * master, so it is important that my recovery finds the migration 2588 * master, so it is important that my recovery finds the migration
2589 * mle and sets the master to UNKNONWN. */ 2589 * mle and sets the master to UNKNOWN. */
2590 2590
2591 2591
2592 /* wait for new node to assert master */ 2592 /* wait for new node to assert master */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 0d38d67194cb..c5e4a49e3a12 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1855,7 +1855,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
1855 * outstanding lock request, so a cancel convert is 1855 * outstanding lock request, so a cancel convert is
1856 * required. We intentionally overwrite 'ret' - if the 1856 * required. We intentionally overwrite 'ret' - if the
1857 * cancel fails and the lock was granted, it's easier 1857 * cancel fails and the lock was granted, it's easier
1858 * to just bubble sucess back up to the user. 1858 * to just bubble success back up to the user.
1859 */ 1859 */
1860 ret = ocfs2_flock_handle_signal(lockres, level); 1860 ret = ocfs2_flock_handle_signal(lockres, level);
1861 } else if (!ret && (level > lockres->l_level)) { 1861 } else if (!ret && (level > lockres->l_level)) {
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 89fc8ee1f5a5..de059f490586 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1712,7 +1712,8 @@ int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1712 struct super_block *sb = inode->i_sb; 1712 struct super_block *sb = inode->i_sb;
1713 1713
1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || 1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) 1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1716 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1716 return 0; 1717 return 0;
1717 1718
1718 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 1719 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 54c16b66327e..bf34c491ae96 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -659,7 +659,7 @@ static int __ocfs2_journal_access(handle_t *handle,
659 659
660 default: 660 default:
661 status = -EINVAL; 661 status = -EINVAL;
662 mlog(ML_ERROR, "Uknown access type!\n"); 662 mlog(ML_ERROR, "Unknown access type!\n");
663 } 663 }
664 if (!status && ocfs2_meta_ecc(osb) && triggers) 664 if (!status && ocfs2_meta_ecc(osb) && triggers)
665 jbd2_journal_set_triggers(bh, &triggers->ot_triggers); 665 jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index eae404602424..d963d8638709 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -35,12 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#ifndef CONFIG_OCFS2_COMPAT_JBD 38#include <linux/jbd2.h>
39# include <linux/jbd2.h>
40#else
41# include <linux/jbd.h>
42# include "ocfs2_jbd_compat.h"
43#endif
44 39
45/* For union ocfs2_dlm_lksb */ 40/* For union ocfs2_dlm_lksb */
46#include "stackglue.h" 41#include "stackglue.h"
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 60287fc56bcb..30967e3f5e43 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2431,7 +2431,7 @@ out:
2431 * we gonna touch and whether we need to create new blocks. 2431 * we gonna touch and whether we need to create new blocks.
2432 * 2432 *
2433 * Normally the refcount blocks store these refcount should be 2433 * Normally the refcount blocks store these refcount should be
2434 * continguous also, so that we can get the number easily. 2434 * contiguous also, so that we can get the number easily.
2435 * As for meta_ac, we will at most add split 2 refcount record and 2435 * As for meta_ac, we will at most add split 2 refcount record and
2436 * 2 more refcount block, so just check it in a rough way. 2436 * 2 more refcount block, so just check it in a rough way.
2437 * 2437 *
@@ -3743,6 +3743,9 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3743 goto out; 3743 goto out;
3744 } 3744 }
3745 3745
3746 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3747 goto attach_xattr;
3748
3746 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3749 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3747 3750
3748 size = i_size_read(inode); 3751 size = i_size_read(inode);
@@ -3769,6 +3772,7 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3769 cpos += num_clusters; 3772 cpos += num_clusters;
3770 } 3773 }
3771 3774
3775attach_xattr:
3772 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3776 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3773 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3777 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3774 &ref_tree->rf_ci, 3778 &ref_tree->rf_ci,
@@ -3858,6 +3862,49 @@ out:
3858 return ret; 3862 return ret;
3859} 3863}
3860 3864
3865static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3866 struct buffer_head *s_bh,
3867 struct inode *t_inode,
3868 struct buffer_head *t_bh)
3869{
3870 int ret;
3871 handle_t *handle;
3872 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3873 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3874 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3875
3876 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3877
3878 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3879 if (IS_ERR(handle)) {
3880 ret = PTR_ERR(handle);
3881 mlog_errno(ret);
3882 goto out;
3883 }
3884
3885 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3886 OCFS2_JOURNAL_ACCESS_WRITE);
3887 if (ret) {
3888 mlog_errno(ret);
3889 goto out_commit;
3890 }
3891
3892 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3893 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3894 le16_to_cpu(s_di->id2.i_data.id_count));
3895 spin_lock(&OCFS2_I(t_inode)->ip_lock);
3896 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3897 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3898 spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3899
3900 ocfs2_journal_dirty(handle, t_bh);
3901
3902out_commit:
3903 ocfs2_commit_trans(osb, handle);
3904out:
3905 return ret;
3906}
3907
3861static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3908static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3862 struct inode *t_inode, 3909 struct inode *t_inode,
3863 struct buffer_head *t_bh, 3910 struct buffer_head *t_bh,
@@ -3997,6 +4044,14 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
3997 goto out; 4044 goto out;
3998 } 4045 }
3999 4046
4047 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4048 ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4049 t_inode, t_bh);
4050 if (ret)
4051 mlog_errno(ret);
4052 goto out;
4053 }
4054
4000 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4055 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4001 1, &ref_tree, &ref_root_bh); 4056 1, &ref_tree, &ref_root_bh);
4002 if (ret) { 4057 if (ret) {
@@ -4013,10 +4068,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
4013 goto out_unlock_refcount; 4068 goto out_unlock_refcount;
4014 } 4069 }
4015 4070
4016 ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
4017 if (ret)
4018 mlog_errno(ret);
4019
4020out_unlock_refcount: 4071out_unlock_refcount:
4021 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4072 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4022 brelse(ref_root_bh); 4073 brelse(ref_root_bh);
@@ -4068,9 +4119,17 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4068 ret = ocfs2_reflink_xattrs(inode, old_bh, 4119 ret = ocfs2_reflink_xattrs(inode, old_bh,
4069 new_inode, new_bh, 4120 new_inode, new_bh,
4070 preserve); 4121 preserve);
4071 if (ret) 4122 if (ret) {
4072 mlog_errno(ret); 4123 mlog_errno(ret);
4124 goto inode_unlock;
4125 }
4073 } 4126 }
4127
4128 ret = ocfs2_complete_reflink(inode, old_bh,
4129 new_inode, new_bh, preserve);
4130 if (ret)
4131 mlog_errno(ret);
4132
4074inode_unlock: 4133inode_unlock:
4075 ocfs2_inode_unlock(new_inode, 1); 4134 ocfs2_inode_unlock(new_inode, 1);
4076 brelse(new_bh); 4135 brelse(new_bh);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 3f2f1c45b7b6..f3df0baa9a48 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -620,51 +620,46 @@ error:
620 620
621static ctl_table ocfs2_nm_table[] = { 621static ctl_table ocfs2_nm_table[] = {
622 { 622 {
623 .ctl_name = 1,
624 .procname = "hb_ctl_path", 623 .procname = "hb_ctl_path",
625 .data = ocfs2_hb_ctl_path, 624 .data = ocfs2_hb_ctl_path,
626 .maxlen = OCFS2_MAX_HB_CTL_PATH, 625 .maxlen = OCFS2_MAX_HB_CTL_PATH,
627 .mode = 0644, 626 .mode = 0644,
628 .proc_handler = &proc_dostring, 627 .proc_handler = proc_dostring,
629 .strategy = &sysctl_string,
630 }, 628 },
631 { .ctl_name = 0 } 629 { }
632}; 630};
633 631
634static ctl_table ocfs2_mod_table[] = { 632static ctl_table ocfs2_mod_table[] = {
635 { 633 {
636 .ctl_name = FS_OCFS2_NM,
637 .procname = "nm", 634 .procname = "nm",
638 .data = NULL, 635 .data = NULL,
639 .maxlen = 0, 636 .maxlen = 0,
640 .mode = 0555, 637 .mode = 0555,
641 .child = ocfs2_nm_table 638 .child = ocfs2_nm_table
642 }, 639 },
643 { .ctl_name = 0} 640 { }
644}; 641};
645 642
646static ctl_table ocfs2_kern_table[] = { 643static ctl_table ocfs2_kern_table[] = {
647 { 644 {
648 .ctl_name = FS_OCFS2,
649 .procname = "ocfs2", 645 .procname = "ocfs2",
650 .data = NULL, 646 .data = NULL,
651 .maxlen = 0, 647 .maxlen = 0,
652 .mode = 0555, 648 .mode = 0555,
653 .child = ocfs2_mod_table 649 .child = ocfs2_mod_table
654 }, 650 },
655 { .ctl_name = 0} 651 { }
656}; 652};
657 653
658static ctl_table ocfs2_root_table[] = { 654static ctl_table ocfs2_root_table[] = {
659 { 655 {
660 .ctl_name = CTL_FS,
661 .procname = "fs", 656 .procname = "fs",
662 .data = NULL, 657 .data = NULL,
663 .maxlen = 0, 658 .maxlen = 0,
664 .mode = 0555, 659 .mode = 0555,
665 .child = ocfs2_kern_table 660 .child = ocfs2_kern_table
666 }, 661 },
667 { .ctl_name = 0 } 662 { }
668}; 663};
669 664
670static struct ctl_table_header *ocfs2_table_header = NULL; 665static struct ctl_table_header *ocfs2_table_header = NULL;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c0e48aeebb1c..14f47d2bfe02 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -773,18 +773,20 @@ static int ocfs2_sb_probe(struct super_block *sb,
773 if (tmpstat < 0) { 773 if (tmpstat < 0) {
774 status = tmpstat; 774 status = tmpstat;
775 mlog_errno(status); 775 mlog_errno(status);
776 goto bail; 776 break;
777 } 777 }
778 di = (struct ocfs2_dinode *) (*bh)->b_data; 778 di = (struct ocfs2_dinode *) (*bh)->b_data;
779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); 779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
780 spin_lock_init(&stats->b_lock); 780 spin_lock_init(&stats->b_lock);
781 status = ocfs2_verify_volume(di, *bh, blksize, stats); 781 tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
782 if (status >= 0) 782 if (tmpstat < 0) {
783 goto bail; 783 brelse(*bh);
784 brelse(*bh); 784 *bh = NULL;
785 *bh = NULL; 785 }
786 if (status != -EAGAIN) 786 if (tmpstat != -EAGAIN) {
787 status = tmpstat;
787 break; 788 break;
789 }
788 } 790 }
789 791
790bail: 792bail:
@@ -1645,6 +1647,10 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1645 buf->f_bavail = buf->f_bfree; 1647 buf->f_bavail = buf->f_bfree;
1646 buf->f_files = numbits; 1648 buf->f_files = numbits;
1647 buf->f_ffree = freebits; 1649 buf->f_ffree = freebits;
1650 buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
1651 & 0xFFFFFFFFUL;
1652 buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
1653 OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
1648 1654
1649 brelse(bh); 1655 brelse(bh);
1650 1656
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index b6284f235d2f..c61369342a27 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -53,11 +53,6 @@
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/rbtree.h> 55#include <linux/rbtree.h>
56#ifndef CONFIG_OCFS2_COMPAT_JBD
57# include <linux/jbd2.h>
58#else
59# include <linux/jbd.h>
60#endif
61 56
62#define MLOG_MASK_PREFIX ML_UPTODATE 57#define MLOG_MASK_PREFIX ML_UPTODATE
63 58
diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c
index e1c0ec0ae989..082234581d05 100644
--- a/fs/omfs/bitmap.c
+++ b/fs/omfs/bitmap.c
@@ -85,7 +85,7 @@ out:
85} 85}
86 86
87/* 87/*
88 * Tries to allocate exactly one block. Returns true if sucessful. 88 * Tries to allocate exactly one block. Returns true if successful.
89 */ 89 */
90int omfs_allocate_block(struct super_block *sb, u64 block) 90int omfs_allocate_block(struct super_block *sb, u64 block)
91{ 91{
diff --git a/fs/open.c b/fs/open.c
index 4f01e06227c6..b4b31d277f3a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -587,6 +587,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
587 error = -EPERM; 587 error = -EPERM;
588 if (!capable(CAP_SYS_CHROOT)) 588 if (!capable(CAP_SYS_CHROOT))
589 goto dput_and_out; 589 goto dput_and_out;
590 error = security_path_chroot(&path);
591 if (error)
592 goto dput_and_out;
590 593
591 set_fs_root(current->fs, &path); 594 set_fs_root(current->fs, &path);
592 error = 0; 595 error = 0;
@@ -617,11 +620,15 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
617 if (err) 620 if (err)
618 goto out_putf; 621 goto out_putf;
619 mutex_lock(&inode->i_mutex); 622 mutex_lock(&inode->i_mutex);
623 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
624 if (err)
625 goto out_unlock;
620 if (mode == (mode_t) -1) 626 if (mode == (mode_t) -1)
621 mode = inode->i_mode; 627 mode = inode->i_mode;
622 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 628 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
623 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 629 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
624 err = notify_change(dentry, &newattrs); 630 err = notify_change(dentry, &newattrs);
631out_unlock:
625 mutex_unlock(&inode->i_mutex); 632 mutex_unlock(&inode->i_mutex);
626 mnt_drop_write(file->f_path.mnt); 633 mnt_drop_write(file->f_path.mnt);
627out_putf: 634out_putf:
@@ -646,11 +653,15 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
646 if (error) 653 if (error)
647 goto dput_and_out; 654 goto dput_and_out;
648 mutex_lock(&inode->i_mutex); 655 mutex_lock(&inode->i_mutex);
656 error = security_path_chmod(path.dentry, path.mnt, mode);
657 if (error)
658 goto out_unlock;
649 if (mode == (mode_t) -1) 659 if (mode == (mode_t) -1)
650 mode = inode->i_mode; 660 mode = inode->i_mode;
651 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 661 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
652 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 662 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
653 error = notify_change(path.dentry, &newattrs); 663 error = notify_change(path.dentry, &newattrs);
664out_unlock:
654 mutex_unlock(&inode->i_mutex); 665 mutex_unlock(&inode->i_mutex);
655 mnt_drop_write(path.mnt); 666 mnt_drop_write(path.mnt);
656dput_and_out: 667dput_and_out:
@@ -664,9 +675,9 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
664 return sys_fchmodat(AT_FDCWD, filename, mode); 675 return sys_fchmodat(AT_FDCWD, filename, mode);
665} 676}
666 677
667static int chown_common(struct dentry * dentry, uid_t user, gid_t group) 678static int chown_common(struct path *path, uid_t user, gid_t group)
668{ 679{
669 struct inode *inode = dentry->d_inode; 680 struct inode *inode = path->dentry->d_inode;
670 int error; 681 int error;
671 struct iattr newattrs; 682 struct iattr newattrs;
672 683
@@ -683,7 +694,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
683 newattrs.ia_valid |= 694 newattrs.ia_valid |=
684 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; 695 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
685 mutex_lock(&inode->i_mutex); 696 mutex_lock(&inode->i_mutex);
686 error = notify_change(dentry, &newattrs); 697 error = security_path_chown(path, user, group);
698 if (!error)
699 error = notify_change(path->dentry, &newattrs);
687 mutex_unlock(&inode->i_mutex); 700 mutex_unlock(&inode->i_mutex);
688 701
689 return error; 702 return error;
@@ -700,7 +713,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
700 error = mnt_want_write(path.mnt); 713 error = mnt_want_write(path.mnt);
701 if (error) 714 if (error)
702 goto out_release; 715 goto out_release;
703 error = chown_common(path.dentry, user, group); 716 error = chown_common(&path, user, group);
704 mnt_drop_write(path.mnt); 717 mnt_drop_write(path.mnt);
705out_release: 718out_release:
706 path_put(&path); 719 path_put(&path);
@@ -725,7 +738,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
725 error = mnt_want_write(path.mnt); 738 error = mnt_want_write(path.mnt);
726 if (error) 739 if (error)
727 goto out_release; 740 goto out_release;
728 error = chown_common(path.dentry, user, group); 741 error = chown_common(&path, user, group);
729 mnt_drop_write(path.mnt); 742 mnt_drop_write(path.mnt);
730out_release: 743out_release:
731 path_put(&path); 744 path_put(&path);
@@ -744,7 +757,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
744 error = mnt_want_write(path.mnt); 757 error = mnt_want_write(path.mnt);
745 if (error) 758 if (error)
746 goto out_release; 759 goto out_release;
747 error = chown_common(path.dentry, user, group); 760 error = chown_common(&path, user, group);
748 mnt_drop_write(path.mnt); 761 mnt_drop_write(path.mnt);
749out_release: 762out_release:
750 path_put(&path); 763 path_put(&path);
@@ -767,7 +780,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
767 goto out_fput; 780 goto out_fput;
768 dentry = file->f_path.dentry; 781 dentry = file->f_path.dentry;
769 audit_inode(NULL, dentry); 782 audit_inode(NULL, dentry);
770 error = chown_common(dentry, user, group); 783 error = chown_common(&file->f_path, user, group);
771 mnt_drop_write(file->f_path.mnt); 784 mnt_drop_write(file->f_path.mnt);
772out_fput: 785out_fput:
773 fput(file); 786 fput(file);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 7b685e10cbad..64bc8998ac9a 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -226,6 +226,13 @@ ssize_t part_alignment_offset_show(struct device *dev,
226 return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); 226 return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
227} 227}
228 228
229ssize_t part_discard_alignment_show(struct device *dev,
230 struct device_attribute *attr, char *buf)
231{
232 struct hd_struct *p = dev_to_part(dev);
233 return sprintf(buf, "%u\n", p->discard_alignment);
234}
235
229ssize_t part_stat_show(struct device *dev, 236ssize_t part_stat_show(struct device *dev,
230 struct device_attribute *attr, char *buf) 237 struct device_attribute *attr, char *buf)
231{ 238{
@@ -288,6 +295,8 @@ static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
288static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); 295static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
289static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 296static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
290static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 297static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
298static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
299 NULL);
291static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 300static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
292static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 301static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
293#ifdef CONFIG_FAIL_MAKE_REQUEST 302#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -300,6 +309,7 @@ static struct attribute *part_attrs[] = {
300 &dev_attr_start.attr, 309 &dev_attr_start.attr,
301 &dev_attr_size.attr, 310 &dev_attr_size.attr,
302 &dev_attr_alignment_offset.attr, 311 &dev_attr_alignment_offset.attr,
312 &dev_attr_discard_alignment.attr,
303 &dev_attr_stat.attr, 313 &dev_attr_stat.attr,
304 &dev_attr_inflight.attr, 314 &dev_attr_inflight.attr,
305#ifdef CONFIG_FAIL_MAKE_REQUEST 315#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -403,6 +413,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
403 413
404 p->start_sect = start; 414 p->start_sect = start;
405 p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); 415 p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
416 p->discard_alignment = queue_sector_discard_alignment(disk->queue,
417 start);
406 p->nr_sects = len; 418 p->nr_sects = len;
407 p->partno = partno; 419 p->partno = partno;
408 p->policy = get_disk_ro(disk); 420 p->policy = get_disk_ro(disk);
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 038a6022152f..49cfd5f54238 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -1,7 +1,9 @@
1/************************************************************ 1/************************************************************
2 * EFI GUID Partition Table handling 2 * EFI GUID Partition Table handling
3 * Per Intel EFI Specification v1.02 3 *
4 * http://developer.intel.com/technology/efi/efi.htm 4 * http://www.uefi.org/specs/
5 * http://www.intel.com/technology/efi/
6 *
5 * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com> 7 * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
6 * Copyright 2000,2001,2002,2004 Dell Inc. 8 * Copyright 2000,2001,2002,2004 Dell Inc.
7 * 9 *
@@ -92,6 +94,7 @@
92 * 94 *
93 ************************************************************/ 95 ************************************************************/
94#include <linux/crc32.h> 96#include <linux/crc32.h>
97#include <linux/math64.h>
95#include "check.h" 98#include "check.h"
96#include "efi.h" 99#include "efi.h"
97 100
@@ -141,7 +144,8 @@ last_lba(struct block_device *bdev)
141{ 144{
142 if (!bdev || !bdev->bd_inode) 145 if (!bdev || !bdev->bd_inode)
143 return 0; 146 return 0;
144 return (bdev->bd_inode->i_size >> 9) - 1ULL; 147 return div_u64(bdev->bd_inode->i_size,
148 bdev_logical_block_size(bdev)) - 1ULL;
145} 149}
146 150
147static inline int 151static inline int
@@ -188,6 +192,7 @@ static size_t
188read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count) 192read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
189{ 193{
190 size_t totalreadcount = 0; 194 size_t totalreadcount = 0;
195 sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
191 196
192 if (!bdev || !buffer || lba > last_lba(bdev)) 197 if (!bdev || !buffer || lba > last_lba(bdev))
193 return 0; 198 return 0;
@@ -195,7 +200,7 @@ read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
195 while (count) { 200 while (count) {
196 int copied = 512; 201 int copied = 512;
197 Sector sect; 202 Sector sect;
198 unsigned char *data = read_dev_sector(bdev, lba++, &sect); 203 unsigned char *data = read_dev_sector(bdev, n++, &sect);
199 if (!data) 204 if (!data)
200 break; 205 break;
201 if (copied > count) 206 if (copied > count)
@@ -257,15 +262,16 @@ static gpt_header *
257alloc_read_gpt_header(struct block_device *bdev, u64 lba) 262alloc_read_gpt_header(struct block_device *bdev, u64 lba)
258{ 263{
259 gpt_header *gpt; 264 gpt_header *gpt;
265 unsigned ssz = bdev_logical_block_size(bdev);
266
260 if (!bdev) 267 if (!bdev)
261 return NULL; 268 return NULL;
262 269
263 gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL); 270 gpt = kzalloc(ssz, GFP_KERNEL);
264 if (!gpt) 271 if (!gpt)
265 return NULL; 272 return NULL;
266 273
267 if (read_lba(bdev, lba, (u8 *) gpt, 274 if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) {
268 sizeof (gpt_header)) < sizeof (gpt_header)) {
269 kfree(gpt); 275 kfree(gpt);
270 gpt=NULL; 276 gpt=NULL;
271 return NULL; 277 return NULL;
@@ -601,6 +607,7 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
601 gpt_header *gpt = NULL; 607 gpt_header *gpt = NULL;
602 gpt_entry *ptes = NULL; 608 gpt_entry *ptes = NULL;
603 u32 i; 609 u32 i;
610 unsigned ssz = bdev_logical_block_size(bdev) / 512;
604 611
605 if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) { 612 if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
606 kfree(gpt); 613 kfree(gpt);
@@ -611,13 +618,14 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
611 pr_debug("GUID Partition Table is valid! Yea!\n"); 618 pr_debug("GUID Partition Table is valid! Yea!\n");
612 619
613 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { 620 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
621 u64 start = le64_to_cpu(ptes[i].starting_lba);
622 u64 size = le64_to_cpu(ptes[i].ending_lba) -
623 le64_to_cpu(ptes[i].starting_lba) + 1ULL;
624
614 if (!is_pte_valid(&ptes[i], last_lba(bdev))) 625 if (!is_pte_valid(&ptes[i], last_lba(bdev)))
615 continue; 626 continue;
616 627
617 put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba), 628 put_partition(state, i+1, start * ssz, size * ssz);
618 (le64_to_cpu(ptes[i].ending_lba) -
619 le64_to_cpu(ptes[i].starting_lba) +
620 1ULL));
621 629
622 /* If this is a RAID volume, tell md */ 630 /* If this is a RAID volume, tell md */
623 if (!efi_guidcmp(ptes[i].partition_type_guid, 631 if (!efi_guidcmp(ptes[i].partition_type_guid,
diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h
index 2cc89d0475bf..6998b589abf9 100644
--- a/fs/partitions/efi.h
+++ b/fs/partitions/efi.h
@@ -37,7 +37,6 @@
37#define EFI_PMBR_OSTYPE_EFI 0xEF 37#define EFI_PMBR_OSTYPE_EFI 0xEF
38#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE 38#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
39 39
40#define GPT_BLOCK_SIZE 512
41#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL 40#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
42#define GPT_HEADER_REVISION_V1 0x00010000 41#define GPT_HEADER_REVISION_V1 0x00010000
43#define GPT_PRIMARY_PARTITION_TABLE_LBA 1 42#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -79,7 +78,12 @@ typedef struct _gpt_header {
79 __le32 num_partition_entries; 78 __le32 num_partition_entries;
80 __le32 sizeof_partition_entry; 79 __le32 sizeof_partition_entry;
81 __le32 partition_entry_array_crc32; 80 __le32 partition_entry_array_crc32;
82 u8 reserved2[GPT_BLOCK_SIZE - 92]; 81
82 /* The rest of the logical block is reserved by UEFI and must be zero.
83 * EFI standard handles this by:
84 *
85 * uint8_t reserved2[ BlockSize - 92 ];
86 */
83} __attribute__ ((packed)) gpt_header; 87} __attribute__ ((packed)) gpt_header;
84 88
85typedef struct _gpt_entry_attributes { 89typedef struct _gpt_entry_attributes {
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 822c2d506518..4badde179b18 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -410,6 +410,16 @@ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
410} 410}
411#endif /* CONFIG_MMU */ 411#endif /* CONFIG_MMU */
412 412
413static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
414{
415 seq_printf(m, "Cpus_allowed:\t");
416 seq_cpumask(m, &task->cpus_allowed);
417 seq_printf(m, "\n");
418 seq_printf(m, "Cpus_allowed_list:\t");
419 seq_cpumask_list(m, &task->cpus_allowed);
420 seq_printf(m, "\n");
421}
422
413int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, 423int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
414 struct pid *pid, struct task_struct *task) 424 struct pid *pid, struct task_struct *task)
415{ 425{
@@ -424,6 +434,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
424 } 434 }
425 task_sig(m, task); 435 task_sig(m, task);
426 task_cap(m, task); 436 task_cap(m, task);
437 task_cpus_allowed(m, task);
427 cpuset_task_status_allowed(m, task); 438 cpuset_task_status_allowed(m, task);
428#if defined(CONFIG_S390) 439#if defined(CONFIG_S390)
429 task_show_regs(m, task); 440 task_show_regs(m, task);
@@ -495,20 +506,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
495 506
496 /* add up live thread stats at the group level */ 507 /* add up live thread stats at the group level */
497 if (whole) { 508 if (whole) {
498 struct task_cputime cputime;
499 struct task_struct *t = task; 509 struct task_struct *t = task;
500 do { 510 do {
501 min_flt += t->min_flt; 511 min_flt += t->min_flt;
502 maj_flt += t->maj_flt; 512 maj_flt += t->maj_flt;
503 gtime = cputime_add(gtime, task_gtime(t)); 513 gtime = cputime_add(gtime, t->gtime);
504 t = next_thread(t); 514 t = next_thread(t);
505 } while (t != task); 515 } while (t != task);
506 516
507 min_flt += sig->min_flt; 517 min_flt += sig->min_flt;
508 maj_flt += sig->maj_flt; 518 maj_flt += sig->maj_flt;
509 thread_group_cputime(task, &cputime); 519 thread_group_times(task, &utime, &stime);
510 utime = cputime.utime;
511 stime = cputime.stime;
512 gtime = cputime_add(gtime, sig->gtime); 520 gtime = cputime_add(gtime, sig->gtime);
513 } 521 }
514 522
@@ -524,9 +532,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
524 if (!whole) { 532 if (!whole) {
525 min_flt = task->min_flt; 533 min_flt = task->min_flt;
526 maj_flt = task->maj_flt; 534 maj_flt = task->maj_flt;
527 utime = task_utime(task); 535 task_times(task, &utime, &stime);
528 stime = task_stime(task); 536 gtime = task->gtime;
529 gtime = task_gtime(task);
530 } 537 }
531 538
532 /* scale priority and nice values from timeslices to -20..20 */ 539 /* scale priority and nice values from timeslices to -20..20 */
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f667e8aeabdf..6ff9981f0a18 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -48,7 +48,7 @@ out:
48static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name) 48static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name)
49{ 49{
50 int len; 50 int len;
51 for ( ; p->ctl_name || p->procname; p++) { 51 for ( ; p->procname; p++) {
52 52
53 if (!p->procname) 53 if (!p->procname)
54 continue; 54 continue;
@@ -218,7 +218,7 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
218 void *dirent, filldir_t filldir) 218 void *dirent, filldir_t filldir)
219{ 219{
220 220
221 for (; table->ctl_name || table->procname; table++, (*pos)++) { 221 for (; table->procname; table++, (*pos)++) {
222 int res; 222 int res;
223 223
224 /* Can't do anything without a proc name */ 224 /* Can't do anything without a proc name */
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 7cc726c6d70a..b9b7aad2003d 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -27,7 +27,7 @@ static int show_stat(struct seq_file *p, void *v)
27 int i, j; 27 int i, j;
28 unsigned long jif; 28 unsigned long jif;
29 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; 29 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
30 cputime64_t guest; 30 cputime64_t guest, guest_nice;
31 u64 sum = 0; 31 u64 sum = 0;
32 u64 sum_softirq = 0; 32 u64 sum_softirq = 0;
33 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; 33 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
@@ -36,7 +36,7 @@ static int show_stat(struct seq_file *p, void *v)
36 36
37 user = nice = system = idle = iowait = 37 user = nice = system = idle = iowait =
38 irq = softirq = steal = cputime64_zero; 38 irq = softirq = steal = cputime64_zero;
39 guest = cputime64_zero; 39 guest = guest_nice = cputime64_zero;
40 getboottime(&boottime); 40 getboottime(&boottime);
41 jif = boottime.tv_sec; 41 jif = boottime.tv_sec;
42 42
@@ -51,6 +51,8 @@ static int show_stat(struct seq_file *p, void *v)
51 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); 51 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
52 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); 52 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
53 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); 53 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
54 guest_nice = cputime64_add(guest_nice,
55 kstat_cpu(i).cpustat.guest_nice);
54 for_each_irq_nr(j) { 56 for_each_irq_nr(j) {
55 sum += kstat_irqs_cpu(j, i); 57 sum += kstat_irqs_cpu(j, i);
56 } 58 }
@@ -65,7 +67,8 @@ static int show_stat(struct seq_file *p, void *v)
65 } 67 }
66 sum += arch_irq_stat(); 68 sum += arch_irq_stat();
67 69
68 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", 70 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu "
71 "%llu\n",
69 (unsigned long long)cputime64_to_clock_t(user), 72 (unsigned long long)cputime64_to_clock_t(user),
70 (unsigned long long)cputime64_to_clock_t(nice), 73 (unsigned long long)cputime64_to_clock_t(nice),
71 (unsigned long long)cputime64_to_clock_t(system), 74 (unsigned long long)cputime64_to_clock_t(system),
@@ -74,7 +77,8 @@ static int show_stat(struct seq_file *p, void *v)
74 (unsigned long long)cputime64_to_clock_t(irq), 77 (unsigned long long)cputime64_to_clock_t(irq),
75 (unsigned long long)cputime64_to_clock_t(softirq), 78 (unsigned long long)cputime64_to_clock_t(softirq),
76 (unsigned long long)cputime64_to_clock_t(steal), 79 (unsigned long long)cputime64_to_clock_t(steal),
77 (unsigned long long)cputime64_to_clock_t(guest)); 80 (unsigned long long)cputime64_to_clock_t(guest),
81 (unsigned long long)cputime64_to_clock_t(guest_nice));
78 for_each_online_cpu(i) { 82 for_each_online_cpu(i) {
79 83
80 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ 84 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
@@ -88,8 +92,10 @@ static int show_stat(struct seq_file *p, void *v)
88 softirq = kstat_cpu(i).cpustat.softirq; 92 softirq = kstat_cpu(i).cpustat.softirq;
89 steal = kstat_cpu(i).cpustat.steal; 93 steal = kstat_cpu(i).cpustat.steal;
90 guest = kstat_cpu(i).cpustat.guest; 94 guest = kstat_cpu(i).cpustat.guest;
95 guest_nice = kstat_cpu(i).cpustat.guest_nice;
91 seq_printf(p, 96 seq_printf(p,
92 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", 97 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
98 "%llu\n",
93 i, 99 i,
94 (unsigned long long)cputime64_to_clock_t(user), 100 (unsigned long long)cputime64_to_clock_t(user),
95 (unsigned long long)cputime64_to_clock_t(nice), 101 (unsigned long long)cputime64_to_clock_t(nice),
@@ -99,7 +105,8 @@ static int show_stat(struct seq_file *p, void *v)
99 (unsigned long long)cputime64_to_clock_t(irq), 105 (unsigned long long)cputime64_to_clock_t(irq),
100 (unsigned long long)cputime64_to_clock_t(softirq), 106 (unsigned long long)cputime64_to_clock_t(softirq),
101 (unsigned long long)cputime64_to_clock_t(steal), 107 (unsigned long long)cputime64_to_clock_t(steal),
102 (unsigned long long)cputime64_to_clock_t(guest)); 108 (unsigned long long)cputime64_to_clock_t(guest),
109 (unsigned long long)cputime64_to_clock_t(guest_nice));
103 } 110 }
104 seq_printf(p, "intr %llu", (unsigned long long)sum); 111 seq_printf(p, "intr %llu", (unsigned long long)sum);
105 112
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 0afba069d567..32f5d131a644 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -67,7 +67,7 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
67 67
68 while (total < size) { 68 while (total < size) {
69 if ((bh = sb_bread(sb, start + offset)) == NULL) { 69 if ((bh = sb_bread(sb, start + offset)) == NULL) {
70 printk("qnx4: I/O error in counting free blocks\n"); 70 printk(KERN_ERR "qnx4: I/O error in counting free blocks\n");
71 break; 71 break;
72 } 72 }
73 count_bits(bh->b_data, size - total, &total_free); 73 count_bits(bh->b_data, size - total, &total_free);
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 86cc39cb1398..6f30c3d5bcbf 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -26,8 +26,8 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
26 int ix, ino; 26 int ix, ino;
27 int size; 27 int size;
28 28
29 QNX4DEBUG(("qnx4_readdir:i_size = %ld\n", (long) inode->i_size)); 29 QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
30 QNX4DEBUG(("filp->f_pos = %ld\n", (long) filp->f_pos)); 30 QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos));
31 31
32 lock_kernel(); 32 lock_kernel();
33 33
@@ -50,7 +50,7 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
50 size = QNX4_NAME_MAX; 50 size = QNX4_NAME_MAX;
51 51
52 if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) { 52 if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) {
53 QNX4DEBUG(("qnx4_readdir:%.*s\n", size, de->di_fname)); 53 QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
54 if ( ( de->di_status & QNX4_FILE_LINK ) == 0 ) 54 if ( ( de->di_status & QNX4_FILE_LINK ) == 0 )
55 ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; 55 ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
56 else { 56 else {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index d2cd1798d8c4..449f5a66dd34 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -107,7 +107,7 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h
107{ 107{
108 unsigned long phys; 108 unsigned long phys;
109 109
110 QNX4DEBUG(("qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock)); 110 QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
111 111
112 phys = qnx4_block_map( inode, iblock ); 112 phys = qnx4_block_map( inode, iblock );
113 if ( phys ) { 113 if ( phys ) {
@@ -142,12 +142,12 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
142 // read next xtnt block. 142 // read next xtnt block.
143 bh = sb_bread(inode->i_sb, i_xblk - 1); 143 bh = sb_bread(inode->i_sb, i_xblk - 1);
144 if ( !bh ) { 144 if ( !bh ) {
145 QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1)); 145 QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
146 return -EIO; 146 return -EIO;
147 } 147 }
148 xblk = (struct qnx4_xblk*)bh->b_data; 148 xblk = (struct qnx4_xblk*)bh->b_data;
149 if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) { 149 if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
150 QNX4DEBUG(("qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk)); 150 QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
151 return -EIO; 151 return -EIO;
152 } 152 }
153 } 153 }
@@ -168,7 +168,7 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
168 brelse( bh ); 168 brelse( bh );
169 } 169 }
170 170
171 QNX4DEBUG(("qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block)); 171 QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
172 return block; 172 return block;
173} 173}
174 174
@@ -209,7 +209,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
209 if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') { 209 if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
210 return "no qnx4 filesystem (no root dir)."; 210 return "no qnx4 filesystem (no root dir).";
211 } else { 211 } else {
212 QNX4DEBUG(("QNX4 filesystem found on dev %s.\n", sb->s_id)); 212 QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
213 rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1; 213 rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
214 rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size); 214 rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
215 for (j = 0; j < rl; j++) { 215 for (j = 0; j < rl; j++) {
@@ -220,7 +220,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
220 for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) { 220 for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
221 rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE); 221 rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
222 if (rootdir->di_fname != NULL) { 222 if (rootdir->di_fname != NULL) {
223 QNX4DEBUG(("Rootdir entry found : [%s]\n", rootdir->di_fname)); 223 QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
224 if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) { 224 if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) {
225 found = 1; 225 found = 1;
226 qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL ); 226 qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
@@ -265,12 +265,12 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
265 if we don't belong here... */ 265 if we don't belong here... */
266 bh = sb_bread(s, 1); 266 bh = sb_bread(s, 1);
267 if (!bh) { 267 if (!bh) {
268 printk("qnx4: unable to read the superblock\n"); 268 printk(KERN_ERR "qnx4: unable to read the superblock\n");
269 goto outnobh; 269 goto outnobh;
270 } 270 }
271 if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) { 271 if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) {
272 if (!silent) 272 if (!silent)
273 printk("qnx4: wrong fsid in superblock.\n"); 273 printk(KERN_ERR "qnx4: wrong fsid in superblock.\n");
274 goto out; 274 goto out;
275 } 275 }
276 s->s_op = &qnx4_sops; 276 s->s_op = &qnx4_sops;
@@ -284,14 +284,14 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
284 errmsg = qnx4_checkroot(s); 284 errmsg = qnx4_checkroot(s);
285 if (errmsg != NULL) { 285 if (errmsg != NULL) {
286 if (!silent) 286 if (!silent)
287 printk("qnx4: %s\n", errmsg); 287 printk(KERN_ERR "qnx4: %s\n", errmsg);
288 goto out; 288 goto out;
289 } 289 }
290 290
291 /* does root not have inode number QNX4_ROOT_INO ?? */ 291 /* does root not have inode number QNX4_ROOT_INO ?? */
292 root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK); 292 root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
293 if (IS_ERR(root)) { 293 if (IS_ERR(root)) {
294 printk("qnx4: get inode failed\n"); 294 printk(KERN_ERR "qnx4: get inode failed\n");
295 ret = PTR_ERR(root); 295 ret = PTR_ERR(root);
296 goto out; 296 goto out;
297 } 297 }
@@ -374,7 +374,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
374 qnx4_inode = qnx4_raw_inode(inode); 374 qnx4_inode = qnx4_raw_inode(inode);
375 inode->i_mode = 0; 375 inode->i_mode = 0;
376 376
377 QNX4DEBUG(("Reading inode : [%d]\n", ino)); 377 QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino));
378 if (!ino) { 378 if (!ino) {
379 printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is " 379 printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
380 "out of range\n", 380 "out of range\n",
@@ -385,7 +385,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
385 block = ino / QNX4_INODES_PER_BLOCK; 385 block = ino / QNX4_INODES_PER_BLOCK;
386 386
387 if (!(bh = sb_bread(sb, block))) { 387 if (!(bh = sb_bread(sb, block))) {
388 printk("qnx4: major problem: unable to read inode from dev " 388 printk(KERN_ERR "qnx4: major problem: unable to read inode from dev "
389 "%s\n", sb->s_id); 389 "%s\n", sb->s_id);
390 iget_failed(inode); 390 iget_failed(inode);
391 return ERR_PTR(-EIO); 391 return ERR_PTR(-EIO);
@@ -499,7 +499,7 @@ static int __init init_qnx4_fs(void)
499 return err; 499 return err;
500 } 500 }
501 501
502 printk("QNX4 filesystem 0.2.3 registered.\n"); 502 printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n");
503 return 0; 503 return 0;
504} 504}
505 505
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index ae1e7edbacd6..58703ebba879 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -30,7 +30,7 @@ static int qnx4_match(int len, const char *name,
30 int namelen, thislen; 30 int namelen, thislen;
31 31
32 if (bh == NULL) { 32 if (bh == NULL) {
33 printk("qnx4: matching unassigned buffer !\n"); 33 printk(KERN_WARNING "qnx4: matching unassigned buffer !\n");
34 return 0; 34 return 0;
35 } 35 }
36 de = (struct qnx4_inode_entry *) (bh->b_data + *offset); 36 de = (struct qnx4_inode_entry *) (bh->b_data + *offset);
@@ -66,7 +66,7 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir,
66 66
67 *res_dir = NULL; 67 *res_dir = NULL;
68 if (!dir->i_sb) { 68 if (!dir->i_sb) {
69 printk("qnx4: no superblock on dir.\n"); 69 printk(KERN_WARNING "qnx4: no superblock on dir.\n");
70 return NULL; 70 return NULL;
71 } 71 }
72 bh = NULL; 72 bh = NULL;
@@ -124,7 +124,7 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
124 foundinode = qnx4_iget(dir->i_sb, ino); 124 foundinode = qnx4_iget(dir->i_sb, ino);
125 if (IS_ERR(foundinode)) { 125 if (IS_ERR(foundinode)) {
126 unlock_kernel(); 126 unlock_kernel();
127 QNX4DEBUG(("qnx4: lookup->iget -> error %ld\n", 127 QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n",
128 PTR_ERR(foundinode))); 128 PTR_ERR(foundinode)));
129 return ERR_CAST(foundinode); 129 return ERR_CAST(foundinode);
130 } 130 }
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46b..353e78a9ebee 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
17 17
18config QUOTA_NETLINK_INTERFACE 18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface" 19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET 20 depends on QUOTACTL && NET
21 help 21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching 22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure, 23 hardlimit, etc.) will be reported through netlink interface. If unsure,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7ed..eb5a755718f6 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
77#include <linux/capability.h> 77#include <linux/capability.h>
78#include <linux/quotaops.h> 78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84 80
85#include <asm/uaccess.h> 81#include <asm/uaccess.h>
86 82
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
1071} 1067}
1072#endif 1068#endif
1073 1069
1074#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1075
1076/* Netlink family structure for quota */
1077static struct genl_family quota_genl_family = {
1078 .id = GENL_ID_GENERATE,
1079 .hdrsize = 0,
1080 .name = "VFS_DQUOT",
1081 .version = 1,
1082 .maxattr = QUOTA_NL_A_MAX,
1083};
1084
1085/* Send warning to userspace about user which exceeded quota */
1086static void send_warning(const struct dquot *dquot, const char warntype)
1087{
1088 static atomic_t seq;
1089 struct sk_buff *skb;
1090 void *msg_head;
1091 int ret;
1092 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1093 2 * nla_total_size(sizeof(u64));
1094
1095 /* We have to allocate using GFP_NOFS as we are called from a
1096 * filesystem performing write and thus further recursion into
1097 * the fs to free some data could cause deadlocks. */
1098 skb = genlmsg_new(msg_size, GFP_NOFS);
1099 if (!skb) {
1100 printk(KERN_ERR
1101 "VFS: Not enough memory to send quota warning.\n");
1102 return;
1103 }
1104 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1105 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1106 if (!msg_head) {
1107 printk(KERN_ERR
1108 "VFS: Cannot store netlink header in quota warning.\n");
1109 goto err_out;
1110 }
1111 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1115 if (ret)
1116 goto attr_err_out;
1117 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1118 if (ret)
1119 goto attr_err_out;
1120 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1121 MAJOR(dquot->dq_sb->s_dev));
1122 if (ret)
1123 goto attr_err_out;
1124 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1125 MINOR(dquot->dq_sb->s_dev));
1126 if (ret)
1127 goto attr_err_out;
1128 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1129 if (ret)
1130 goto attr_err_out;
1131 genlmsg_end(skb, msg_head);
1132
1133 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1134 return;
1135attr_err_out:
1136 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1137err_out:
1138 kfree_skb(skb);
1139}
1140#endif
1141/* 1070/*
1142 * Write warnings to the console and send warning messages over netlink. 1071 * Write warnings to the console and send warning messages over netlink.
1143 * 1072 *
@@ -1145,18 +1074,20 @@ err_out:
1145 */ 1074 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype) 1075static void flush_warnings(struct dquot *const *dquots, char *warntype)
1147{ 1076{
1077 struct dquot *dq;
1148 int i; 1078 int i;
1149 1079
1150 for (i = 0; i < MAXQUOTAS; i++) 1080 for (i = 0; i < MAXQUOTAS; i++) {
1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && 1081 dq = dquots[i];
1152 !warning_issued(dquots[i], warntype[i])) { 1082 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1083 !warning_issued(dq, warntype[i])) {
1153#ifdef CONFIG_PRINT_QUOTA_WARNING 1084#ifdef CONFIG_PRINT_QUOTA_WARNING
1154 print_warning(dquots[i], warntype[i]); 1085 print_warning(dq, warntype[i]);
1155#endif
1156#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1157 send_warning(dquots[i], warntype[i]);
1158#endif 1086#endif
1087 quota_send_warning(dq->dq_type, dq->dq_id,
1088 dq->dq_sb->s_dev, warntype[i]);
1159 } 1089 }
1090 }
1160} 1091}
1161 1092
1162static int ignore_hardlimit(struct dquot *dquot) 1093static int ignore_hardlimit(struct dquot *dquot)
@@ -2473,100 +2404,89 @@ const struct quotactl_ops vfs_quotactl_ops = {
2473 2404
2474static ctl_table fs_dqstats_table[] = { 2405static ctl_table fs_dqstats_table[] = {
2475 { 2406 {
2476 .ctl_name = FS_DQ_LOOKUPS,
2477 .procname = "lookups", 2407 .procname = "lookups",
2478 .data = &dqstats.lookups, 2408 .data = &dqstats.lookups,
2479 .maxlen = sizeof(int), 2409 .maxlen = sizeof(int),
2480 .mode = 0444, 2410 .mode = 0444,
2481 .proc_handler = &proc_dointvec, 2411 .proc_handler = proc_dointvec,
2482 }, 2412 },
2483 { 2413 {
2484 .ctl_name = FS_DQ_DROPS,
2485 .procname = "drops", 2414 .procname = "drops",
2486 .data = &dqstats.drops, 2415 .data = &dqstats.drops,
2487 .maxlen = sizeof(int), 2416 .maxlen = sizeof(int),
2488 .mode = 0444, 2417 .mode = 0444,
2489 .proc_handler = &proc_dointvec, 2418 .proc_handler = proc_dointvec,
2490 }, 2419 },
2491 { 2420 {
2492 .ctl_name = FS_DQ_READS,
2493 .procname = "reads", 2421 .procname = "reads",
2494 .data = &dqstats.reads, 2422 .data = &dqstats.reads,
2495 .maxlen = sizeof(int), 2423 .maxlen = sizeof(int),
2496 .mode = 0444, 2424 .mode = 0444,
2497 .proc_handler = &proc_dointvec, 2425 .proc_handler = proc_dointvec,
2498 }, 2426 },
2499 { 2427 {
2500 .ctl_name = FS_DQ_WRITES,
2501 .procname = "writes", 2428 .procname = "writes",
2502 .data = &dqstats.writes, 2429 .data = &dqstats.writes,
2503 .maxlen = sizeof(int), 2430 .maxlen = sizeof(int),
2504 .mode = 0444, 2431 .mode = 0444,
2505 .proc_handler = &proc_dointvec, 2432 .proc_handler = proc_dointvec,
2506 }, 2433 },
2507 { 2434 {
2508 .ctl_name = FS_DQ_CACHE_HITS,
2509 .procname = "cache_hits", 2435 .procname = "cache_hits",
2510 .data = &dqstats.cache_hits, 2436 .data = &dqstats.cache_hits,
2511 .maxlen = sizeof(int), 2437 .maxlen = sizeof(int),
2512 .mode = 0444, 2438 .mode = 0444,
2513 .proc_handler = &proc_dointvec, 2439 .proc_handler = proc_dointvec,
2514 }, 2440 },
2515 { 2441 {
2516 .ctl_name = FS_DQ_ALLOCATED,
2517 .procname = "allocated_dquots", 2442 .procname = "allocated_dquots",
2518 .data = &dqstats.allocated_dquots, 2443 .data = &dqstats.allocated_dquots,
2519 .maxlen = sizeof(int), 2444 .maxlen = sizeof(int),
2520 .mode = 0444, 2445 .mode = 0444,
2521 .proc_handler = &proc_dointvec, 2446 .proc_handler = proc_dointvec,
2522 }, 2447 },
2523 { 2448 {
2524 .ctl_name = FS_DQ_FREE,
2525 .procname = "free_dquots", 2449 .procname = "free_dquots",
2526 .data = &dqstats.free_dquots, 2450 .data = &dqstats.free_dquots,
2527 .maxlen = sizeof(int), 2451 .maxlen = sizeof(int),
2528 .mode = 0444, 2452 .mode = 0444,
2529 .proc_handler = &proc_dointvec, 2453 .proc_handler = proc_dointvec,
2530 }, 2454 },
2531 { 2455 {
2532 .ctl_name = FS_DQ_SYNCS,
2533 .procname = "syncs", 2456 .procname = "syncs",
2534 .data = &dqstats.syncs, 2457 .data = &dqstats.syncs,
2535 .maxlen = sizeof(int), 2458 .maxlen = sizeof(int),
2536 .mode = 0444, 2459 .mode = 0444,
2537 .proc_handler = &proc_dointvec, 2460 .proc_handler = proc_dointvec,
2538 }, 2461 },
2539#ifdef CONFIG_PRINT_QUOTA_WARNING 2462#ifdef CONFIG_PRINT_QUOTA_WARNING
2540 { 2463 {
2541 .ctl_name = FS_DQ_WARNINGS,
2542 .procname = "warnings", 2464 .procname = "warnings",
2543 .data = &flag_print_warnings, 2465 .data = &flag_print_warnings,
2544 .maxlen = sizeof(int), 2466 .maxlen = sizeof(int),
2545 .mode = 0644, 2467 .mode = 0644,
2546 .proc_handler = &proc_dointvec, 2468 .proc_handler = proc_dointvec,
2547 }, 2469 },
2548#endif 2470#endif
2549 { .ctl_name = 0 }, 2471 { },
2550}; 2472};
2551 2473
2552static ctl_table fs_table[] = { 2474static ctl_table fs_table[] = {
2553 { 2475 {
2554 .ctl_name = FS_DQSTATS,
2555 .procname = "quota", 2476 .procname = "quota",
2556 .mode = 0555, 2477 .mode = 0555,
2557 .child = fs_dqstats_table, 2478 .child = fs_dqstats_table,
2558 }, 2479 },
2559 { .ctl_name = 0 }, 2480 { },
2560}; 2481};
2561 2482
2562static ctl_table sys_table[] = { 2483static ctl_table sys_table[] = {
2563 { 2484 {
2564 .ctl_name = CTL_FS,
2565 .procname = "fs", 2485 .procname = "fs",
2566 .mode = 0555, 2486 .mode = 0555,
2567 .child = fs_table, 2487 .child = fs_table,
2568 }, 2488 },
2569 { .ctl_name = 0 }, 2489 { },
2570}; 2490};
2571 2491
2572static int __init dquot_init(void) 2492static int __init dquot_init(void)
@@ -2607,12 +2527,6 @@ static int __init dquot_init(void)
2607 2527
2608 register_shrinker(&dqcache_shrinker); 2528 register_shrinker(&dqcache_shrinker);
2609 2529
2610#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2611 if (genl_register_family(&quota_genl_family) != 0)
2612 printk(KERN_ERR
2613 "VFS: Failed to create quota netlink interface.\n");
2614#endif
2615
2616 return 0; 2530 return 0;
2617} 2531}
2618module_init(dquot_init); 2532module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b2..ee91e2756950 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/netlink.h>
22#include <net/genetlink.h>
21 23
22/* Check validity of generic quotactl commands */ 24/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
525 return ret; 527 return ret;
526} 528}
527#endif 529#endif
530
531
532#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
533
534/* Netlink family structure for quota */
535static struct genl_family quota_genl_family = {
536 .id = GENL_ID_GENERATE,
537 .hdrsize = 0,
538 .name = "VFS_DQUOT",
539 .version = 1,
540 .maxattr = QUOTA_NL_A_MAX,
541};
542
543/**
544 * quota_send_warning - Send warning to userspace about exceeded quota
545 * @type: The quota type: USRQQUOTA, GRPQUOTA,...
546 * @id: The user or group id of the quota that was exceeded
547 * @dev: The device on which the fs is mounted (sb->s_dev)
548 * @warntype: The type of the warning: QUOTA_NL_...
549 *
550 * This can be used by filesystems (including those which don't use
551 * dquot) to send a message to userspace relating to quota limits.
552 *
553 */
554
555void quota_send_warning(short type, unsigned int id, dev_t dev,
556 const char warntype)
557{
558 static atomic_t seq;
559 struct sk_buff *skb;
560 void *msg_head;
561 int ret;
562 int msg_size = 4 * nla_total_size(sizeof(u32)) +
563 2 * nla_total_size(sizeof(u64));
564
565 /* We have to allocate using GFP_NOFS as we are called from a
566 * filesystem performing write and thus further recursion into
567 * the fs to free some data could cause deadlocks. */
568 skb = genlmsg_new(msg_size, GFP_NOFS);
569 if (!skb) {
570 printk(KERN_ERR
571 "VFS: Not enough memory to send quota warning.\n");
572 return;
573 }
574 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
575 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
576 if (!msg_head) {
577 printk(KERN_ERR
578 "VFS: Cannot store netlink header in quota warning.\n");
579 goto err_out;
580 }
581 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
582 if (ret)
583 goto attr_err_out;
584 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
585 if (ret)
586 goto attr_err_out;
587 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
588 if (ret)
589 goto attr_err_out;
590 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
591 if (ret)
592 goto attr_err_out;
593 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
594 if (ret)
595 goto attr_err_out;
596 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
597 if (ret)
598 goto attr_err_out;
599 genlmsg_end(skb, msg_head);
600
601 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
602 return;
603attr_err_out:
604 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
605err_out:
606 kfree_skb(skb);
607}
608EXPORT_SYMBOL(quota_send_warning);
609
610static int __init quota_init(void)
611{
612 if (genl_register_family(&quota_genl_family) != 0)
613 printk(KERN_ERR
614 "VFS: Failed to create quota netlink interface.\n");
615 return 0;
616};
617
618module_init(quota_init);
619#endif
620
diff --git a/fs/read_write.c b/fs/read_write.c
index 3ac28987f22a..b7f4a1f94d48 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -826,8 +826,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
826 if (!(out_file->f_mode & FMODE_WRITE)) 826 if (!(out_file->f_mode & FMODE_WRITE))
827 goto fput_out; 827 goto fput_out;
828 retval = -EINVAL; 828 retval = -EINVAL;
829 if (!out_file->f_op || !out_file->f_op->sendpage)
830 goto fput_out;
831 in_inode = in_file->f_path.dentry->d_inode; 829 in_inode = in_file->f_path.dentry->d_inode;
832 out_inode = out_file->f_path.dentry->d_inode; 830 out_inode = out_file->f_path.dentry->d_inode;
833 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); 831 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 7c5ab6330dd6..6a9e30c041dd 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \ 7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \ 8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
9 hashes.o tail_conversion.o journal.o resize.o \ 9 hashes.o tail_conversion.o journal.o resize.o \
10 item_ops.o ioctl.o procfs.o xattr.o 10 item_ops.o ioctl.o procfs.o xattr.o lock.o
11 11
12ifeq ($(CONFIG_REISERFS_FS_XATTR),y) 12ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
13reiserfs-objs += xattr_user.o xattr_trusted.o 13reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index e716161ab325..685495707181 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1249,14 +1249,18 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1249 else if (bitmap == 0) 1249 else if (bitmap == 0)
1250 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; 1250 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1251 1251
1252 reiserfs_write_unlock(sb);
1252 bh = sb_bread(sb, block); 1253 bh = sb_bread(sb, block);
1254 reiserfs_write_lock(sb);
1253 if (bh == NULL) 1255 if (bh == NULL)
1254 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " 1256 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1255 "reading failed", __func__, block); 1257 "reading failed", __func__, block);
1256 else { 1258 else {
1257 if (buffer_locked(bh)) { 1259 if (buffer_locked(bh)) {
1258 PROC_INFO_INC(sb, scan_bitmap.wait); 1260 PROC_INFO_INC(sb, scan_bitmap.wait);
1261 reiserfs_write_unlock(sb);
1259 __wait_on_buffer(bh); 1262 __wait_on_buffer(bh);
1263 reiserfs_write_lock(sb);
1260 } 1264 }
1261 BUG_ON(!buffer_uptodate(bh)); 1265 BUG_ON(!buffer_uptodate(bh));
1262 BUG_ON(atomic_read(&bh->b_count) == 0); 1266 BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 6d2668fdc384..c094f58c7448 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -20,7 +20,7 @@ const struct file_operations reiserfs_dir_operations = {
20 .read = generic_read_dir, 20 .read = generic_read_dir,
21 .readdir = reiserfs_readdir, 21 .readdir = reiserfs_readdir,
22 .fsync = reiserfs_dir_fsync, 22 .fsync = reiserfs_dir_fsync,
23 .ioctl = reiserfs_ioctl, 23 .unlocked_ioctl = reiserfs_ioctl,
24#ifdef CONFIG_COMPAT 24#ifdef CONFIG_COMPAT
25 .compat_ioctl = reiserfs_compat_ioctl, 25 .compat_ioctl = reiserfs_compat_ioctl,
26#endif 26#endif
@@ -174,14 +174,22 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
174 // user space buffer is swapped out. At that time 174 // user space buffer is swapped out. At that time
175 // entry can move to somewhere else 175 // entry can move to somewhere else
176 memcpy(local_buf, d_name, d_reclen); 176 memcpy(local_buf, d_name, d_reclen);
177
178 /*
179 * Since filldir might sleep, we can release
180 * the write lock here for other waiters
181 */
182 reiserfs_write_unlock(inode->i_sb);
177 if (filldir 183 if (filldir
178 (dirent, local_buf, d_reclen, d_off, d_ino, 184 (dirent, local_buf, d_reclen, d_off, d_ino,
179 DT_UNKNOWN) < 0) { 185 DT_UNKNOWN) < 0) {
186 reiserfs_write_lock(inode->i_sb);
180 if (local_buf != small_buf) { 187 if (local_buf != small_buf) {
181 kfree(local_buf); 188 kfree(local_buf);
182 } 189 }
183 goto end; 190 goto end;
184 } 191 }
192 reiserfs_write_lock(inode->i_sb);
185 if (local_buf != small_buf) { 193 if (local_buf != small_buf) {
186 kfree(local_buf); 194 kfree(local_buf);
187 } 195 }
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 128d3f7c8aa5..60c080440661 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -21,14 +21,6 @@
21#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23 23
24#ifdef CONFIG_REISERFS_CHECK
25
26struct tree_balance *cur_tb = NULL; /* detects whether more than one
27 copy of tb exists as a means
28 of checking whether schedule
29 is interrupting do_balance */
30#endif
31
32static inline void buffer_info_init_left(struct tree_balance *tb, 24static inline void buffer_info_init_left(struct tree_balance *tb,
33 struct buffer_info *bi) 25 struct buffer_info *bi)
34{ 26{
@@ -1840,11 +1832,12 @@ static int check_before_balancing(struct tree_balance *tb)
1840{ 1832{
1841 int retval = 0; 1833 int retval = 0;
1842 1834
1843 if (cur_tb) { 1835 if (REISERFS_SB(tb->tb_sb)->cur_tb) {
1844 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " 1836 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
1845 "occurred based on cur_tb not being null at " 1837 "occurred based on cur_tb not being null at "
1846 "this point in code. do_balance cannot properly " 1838 "this point in code. do_balance cannot properly "
1847 "handle schedule occurring while it runs."); 1839 "handle concurrent tree accesses on a same "
1840 "mount point.");
1848 } 1841 }
1849 1842
1850 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have 1843 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
@@ -1986,7 +1979,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
1986 "check");*/ 1979 "check");*/
1987 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 1980 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
1988#ifdef CONFIG_REISERFS_CHECK 1981#ifdef CONFIG_REISERFS_CHECK
1989 cur_tb = tb; 1982 REISERFS_SB(tb->tb_sb)->cur_tb = tb;
1990#endif 1983#endif
1991} 1984}
1992 1985
@@ -1996,7 +1989,7 @@ static inline void do_balance_completed(struct tree_balance *tb)
1996#ifdef CONFIG_REISERFS_CHECK 1989#ifdef CONFIG_REISERFS_CHECK
1997 check_leaf_level(tb); 1990 check_leaf_level(tb);
1998 check_internal_levels(tb); 1991 check_internal_levels(tb);
1999 cur_tb = NULL; 1992 REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
2000#endif 1993#endif
2001 1994
2002 /* reiserfs_free_block is no longer schedule safe. So, we need to 1995 /* reiserfs_free_block is no longer schedule safe. So, we need to
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9f436668b7f8..da2dba082e2d 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -284,7 +284,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
284const struct file_operations reiserfs_file_operations = { 284const struct file_operations reiserfs_file_operations = {
285 .read = do_sync_read, 285 .read = do_sync_read,
286 .write = reiserfs_file_write, 286 .write = reiserfs_file_write,
287 .ioctl = reiserfs_ioctl, 287 .unlocked_ioctl = reiserfs_ioctl,
288#ifdef CONFIG_COMPAT 288#ifdef CONFIG_COMPAT
289 .compat_ioctl = reiserfs_compat_ioctl, 289 .compat_ioctl = reiserfs_compat_ioctl,
290#endif 290#endif
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 5e5a4e6fbaf8..6591cb21edf6 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -563,9 +563,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
563 return needed_nodes; 563 return needed_nodes;
564} 564}
565 565
566#ifdef CONFIG_REISERFS_CHECK
567extern struct tree_balance *cur_tb;
568#endif
569 566
570/* Set parameters for balancing. 567/* Set parameters for balancing.
571 * Performs write of results of analysis of balancing into structure tb, 568 * Performs write of results of analysis of balancing into structure tb,
@@ -834,7 +831,7 @@ static int get_empty_nodes(struct tree_balance *tb, int h)
834 RFALSE(buffer_dirty(new_bh) || 831 RFALSE(buffer_dirty(new_bh) ||
835 buffer_journaled(new_bh) || 832 buffer_journaled(new_bh) ||
836 buffer_journal_dirty(new_bh), 833 buffer_journal_dirty(new_bh),
837 "PAP-8140: journlaled or dirty buffer %b for the new block", 834 "PAP-8140: journaled or dirty buffer %b for the new block",
838 new_bh); 835 new_bh);
839 836
840 /* Put empty buffers into the array. */ 837 /* Put empty buffers into the array. */
@@ -1022,7 +1019,11 @@ static int get_far_parent(struct tree_balance *tb,
1022 /* Check whether the common parent is locked. */ 1019 /* Check whether the common parent is locked. */
1023 1020
1024 if (buffer_locked(*pcom_father)) { 1021 if (buffer_locked(*pcom_father)) {
1022
1023 /* Release the write lock while the buffer is busy */
1024 reiserfs_write_unlock(tb->tb_sb);
1025 __wait_on_buffer(*pcom_father); 1025 __wait_on_buffer(*pcom_father);
1026 reiserfs_write_lock(tb->tb_sb);
1026 if (FILESYSTEM_CHANGED_TB(tb)) { 1027 if (FILESYSTEM_CHANGED_TB(tb)) {
1027 brelse(*pcom_father); 1028 brelse(*pcom_father);
1028 return REPEAT_SEARCH; 1029 return REPEAT_SEARCH;
@@ -1927,7 +1928,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
1927 return REPEAT_SEARCH; 1928 return REPEAT_SEARCH;
1928 1929
1929 if (buffer_locked(bh)) { 1930 if (buffer_locked(bh)) {
1931 reiserfs_write_unlock(tb->tb_sb);
1930 __wait_on_buffer(bh); 1932 __wait_on_buffer(bh);
1933 reiserfs_write_lock(tb->tb_sb);
1931 if (FILESYSTEM_CHANGED_TB(tb)) 1934 if (FILESYSTEM_CHANGED_TB(tb))
1932 return REPEAT_SEARCH; 1935 return REPEAT_SEARCH;
1933 } 1936 }
@@ -1965,7 +1968,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
1965 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> 1968 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
1966 FL[h]); 1969 FL[h]);
1967 son_number = B_N_CHILD_NUM(tb->FL[h], child_position); 1970 son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
1971 reiserfs_write_unlock(sb);
1968 bh = sb_bread(sb, son_number); 1972 bh = sb_bread(sb, son_number);
1973 reiserfs_write_lock(sb);
1969 if (!bh) 1974 if (!bh)
1970 return IO_ERROR; 1975 return IO_ERROR;
1971 if (FILESYSTEM_CHANGED_TB(tb)) { 1976 if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2003,7 +2008,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
2003 child_position = 2008 child_position =
2004 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; 2009 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
2005 son_number = B_N_CHILD_NUM(tb->FR[h], child_position); 2010 son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
2011 reiserfs_write_unlock(sb);
2006 bh = sb_bread(sb, son_number); 2012 bh = sb_bread(sb, son_number);
2013 reiserfs_write_lock(sb);
2007 if (!bh) 2014 if (!bh)
2008 return IO_ERROR; 2015 return IO_ERROR;
2009 if (FILESYSTEM_CHANGED_TB(tb)) { 2016 if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2278,7 +2285,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
2278 REPEAT_SEARCH : CARRY_ON; 2285 REPEAT_SEARCH : CARRY_ON;
2279 } 2286 }
2280#endif 2287#endif
2288 reiserfs_write_unlock(tb->tb_sb);
2281 __wait_on_buffer(locked); 2289 __wait_on_buffer(locked);
2290 reiserfs_write_lock(tb->tb_sb);
2282 if (FILESYSTEM_CHANGED_TB(tb)) 2291 if (FILESYSTEM_CHANGED_TB(tb))
2283 return REPEAT_SEARCH; 2292 return REPEAT_SEARCH;
2284 } 2293 }
@@ -2349,12 +2358,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
2349 2358
2350 /* if it possible in indirect_to_direct conversion */ 2359 /* if it possible in indirect_to_direct conversion */
2351 if (buffer_locked(tbS0)) { 2360 if (buffer_locked(tbS0)) {
2361 reiserfs_write_unlock(tb->tb_sb);
2352 __wait_on_buffer(tbS0); 2362 __wait_on_buffer(tbS0);
2363 reiserfs_write_lock(tb->tb_sb);
2353 if (FILESYSTEM_CHANGED_TB(tb)) 2364 if (FILESYSTEM_CHANGED_TB(tb))
2354 return REPEAT_SEARCH; 2365 return REPEAT_SEARCH;
2355 } 2366 }
2356#ifdef CONFIG_REISERFS_CHECK 2367#ifdef CONFIG_REISERFS_CHECK
2357 if (cur_tb) { 2368 if (REISERFS_SB(tb->tb_sb)->cur_tb) {
2358 print_cur_tb("fix_nodes"); 2369 print_cur_tb("fix_nodes");
2359 reiserfs_panic(tb->tb_sb, "PAP-8305", 2370 reiserfs_panic(tb->tb_sb, "PAP-8305",
2360 "there is pending do_balance"); 2371 "there is pending do_balance");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a14d6cd9eeda..3a28e7751b3c 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -251,7 +251,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
251 struct cpu_key key; 251 struct cpu_key key;
252 struct buffer_head *bh; 252 struct buffer_head *bh;
253 struct item_head *ih, tmp_ih; 253 struct item_head *ih, tmp_ih;
254 int fs_gen;
255 b_blocknr_t blocknr; 254 b_blocknr_t blocknr;
256 char *p = NULL; 255 char *p = NULL;
257 int chars; 256 int chars;
@@ -265,7 +264,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
265 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 264 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
266 3); 265 3);
267 266
268 research:
269 result = search_for_position_by_key(inode->i_sb, &key, &path); 267 result = search_for_position_by_key(inode->i_sb, &key, &path);
270 if (result != POSITION_FOUND) { 268 if (result != POSITION_FOUND) {
271 pathrelse(&path); 269 pathrelse(&path);
@@ -340,7 +338,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
340 } 338 }
341 // read file tail into part of page 339 // read file tail into part of page
342 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); 340 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
343 fs_gen = get_generation(inode->i_sb);
344 copy_item_head(&tmp_ih, ih); 341 copy_item_head(&tmp_ih, ih);
345 342
346 /* we only want to kmap if we are reading the tail into the page. 343 /* we only want to kmap if we are reading the tail into the page.
@@ -348,13 +345,9 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
348 ** sure we need to. But, this means the item might move if 345 ** sure we need to. But, this means the item might move if
349 ** kmap schedules 346 ** kmap schedules
350 */ 347 */
351 if (!p) { 348 if (!p)
352 p = (char *)kmap(bh_result->b_page); 349 p = (char *)kmap(bh_result->b_page);
353 if (fs_changed(fs_gen, inode->i_sb) 350
354 && item_moved(&tmp_ih, &path)) {
355 goto research;
356 }
357 }
358 p += offset; 351 p += offset;
359 memset(p, 0, inode->i_sb->s_blocksize); 352 memset(p, 0, inode->i_sb->s_blocksize);
360 do { 353 do {
@@ -489,10 +482,14 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
489 disappeared */ 482 disappeared */
490 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) { 483 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
491 int err; 484 int err;
492 lock_kernel(); 485
486 reiserfs_write_lock(inode->i_sb);
487
493 err = reiserfs_commit_for_inode(inode); 488 err = reiserfs_commit_for_inode(inode);
494 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 489 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
495 unlock_kernel(); 490
491 reiserfs_write_unlock(inode->i_sb);
492
496 if (err < 0) 493 if (err < 0)
497 ret = err; 494 ret = err;
498 } 495 }
@@ -601,6 +598,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
601 __le32 *item; 598 __le32 *item;
602 int done; 599 int done;
603 int fs_gen; 600 int fs_gen;
601 int lock_depth;
604 struct reiserfs_transaction_handle *th = NULL; 602 struct reiserfs_transaction_handle *th = NULL;
605 /* space reserved in transaction batch: 603 /* space reserved in transaction batch:
606 . 3 balancings in direct->indirect conversion 604 . 3 balancings in direct->indirect conversion
@@ -616,12 +614,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
616 loff_t new_offset = 614 loff_t new_offset =
617 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1; 615 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
618 616
619 /* bad.... */ 617 lock_depth = reiserfs_write_lock_once(inode->i_sb);
620 reiserfs_write_lock(inode->i_sb);
621 version = get_inode_item_key_version(inode); 618 version = get_inode_item_key_version(inode);
622 619
623 if (!file_capable(inode, block)) { 620 if (!file_capable(inode, block)) {
624 reiserfs_write_unlock(inode->i_sb); 621 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
625 return -EFBIG; 622 return -EFBIG;
626 } 623 }
627 624
@@ -633,7 +630,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
633 /* find number of block-th logical block of the file */ 630 /* find number of block-th logical block of the file */
634 ret = _get_block_create_0(inode, block, bh_result, 631 ret = _get_block_create_0(inode, block, bh_result,
635 create | GET_BLOCK_READ_DIRECT); 632 create | GET_BLOCK_READ_DIRECT);
636 reiserfs_write_unlock(inode->i_sb); 633 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
637 return ret; 634 return ret;
638 } 635 }
639 /* 636 /*
@@ -751,7 +748,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
751 if (!dangle && th) 748 if (!dangle && th)
752 retval = reiserfs_end_persistent_transaction(th); 749 retval = reiserfs_end_persistent_transaction(th);
753 750
754 reiserfs_write_unlock(inode->i_sb); 751 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
755 752
756 /* the item was found, so new blocks were not added to the file 753 /* the item was found, so new blocks were not added to the file
757 ** there is no need to make sure the inode is updated with this 754 ** there is no need to make sure the inode is updated with this
@@ -935,7 +932,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
935 if (blocks_needed == 1) { 932 if (blocks_needed == 1) {
936 un = &unf_single; 933 un = &unf_single;
937 } else { 934 } else {
938 un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. 935 un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
939 if (!un) { 936 if (!un) {
940 un = &unf_single; 937 un = &unf_single;
941 blocks_needed = 1; 938 blocks_needed = 1;
@@ -997,10 +994,16 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
997 if (retval) 994 if (retval)
998 goto failure; 995 goto failure;
999 } 996 }
1000 /* inserting indirect pointers for a hole can take a 997 /*
1001 ** long time. reschedule if needed 998 * inserting indirect pointers for a hole can take a
999 * long time. reschedule if needed and also release the write
1000 * lock for others.
1002 */ 1001 */
1003 cond_resched(); 1002 if (need_resched()) {
1003 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
1004 schedule();
1005 lock_depth = reiserfs_write_lock_once(inode->i_sb);
1006 }
1004 1007
1005 retval = search_for_position_by_key(inode->i_sb, &key, &path); 1008 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1006 if (retval == IO_ERROR) { 1009 if (retval == IO_ERROR) {
@@ -1035,7 +1038,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
1035 retval = err; 1038 retval = err;
1036 } 1039 }
1037 1040
1038 reiserfs_write_unlock(inode->i_sb); 1041 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
1039 reiserfs_check_path(&path); 1042 reiserfs_check_path(&path);
1040 return retval; 1043 return retval;
1041} 1044}
@@ -2072,8 +2075,9 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2072 int error; 2075 int error;
2073 struct buffer_head *bh = NULL; 2076 struct buffer_head *bh = NULL;
2074 int err2; 2077 int err2;
2078 int lock_depth;
2075 2079
2076 reiserfs_write_lock(inode->i_sb); 2080 lock_depth = reiserfs_write_lock_once(inode->i_sb);
2077 2081
2078 if (inode->i_size > 0) { 2082 if (inode->i_size > 0) {
2079 error = grab_tail_page(inode, &page, &bh); 2083 error = grab_tail_page(inode, &page, &bh);
@@ -2142,14 +2146,17 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2142 page_cache_release(page); 2146 page_cache_release(page);
2143 } 2147 }
2144 2148
2145 reiserfs_write_unlock(inode->i_sb); 2149 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2150
2146 return 0; 2151 return 0;
2147 out: 2152 out:
2148 if (page) { 2153 if (page) {
2149 unlock_page(page); 2154 unlock_page(page);
2150 page_cache_release(page); 2155 page_cache_release(page);
2151 } 2156 }
2152 reiserfs_write_unlock(inode->i_sb); 2157
2158 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2159
2153 return error; 2160 return error;
2154} 2161}
2155 2162
@@ -2608,7 +2615,10 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
2608 int ret; 2615 int ret;
2609 int old_ref = 0; 2616 int old_ref = 0;
2610 2617
2618 reiserfs_write_unlock(inode->i_sb);
2611 reiserfs_wait_on_write_block(inode->i_sb); 2619 reiserfs_wait_on_write_block(inode->i_sb);
2620 reiserfs_write_lock(inode->i_sb);
2621
2612 fix_tail_page_for_writing(page); 2622 fix_tail_page_for_writing(page);
2613 if (reiserfs_transaction_running(inode->i_sb)) { 2623 if (reiserfs_transaction_running(inode->i_sb)) {
2614 struct reiserfs_transaction_handle *th; 2624 struct reiserfs_transaction_handle *th;
@@ -2664,6 +2674,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2664 int update_sd = 0; 2674 int update_sd = 0;
2665 struct reiserfs_transaction_handle *th; 2675 struct reiserfs_transaction_handle *th;
2666 unsigned start; 2676 unsigned start;
2677 int lock_depth = 0;
2678 bool locked = false;
2667 2679
2668 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND) 2680 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2669 pos ++; 2681 pos ++;
@@ -2690,9 +2702,11 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2690 ** to do the i_size updates here. 2702 ** to do the i_size updates here.
2691 */ 2703 */
2692 pos += copied; 2704 pos += copied;
2705
2693 if (pos > inode->i_size) { 2706 if (pos > inode->i_size) {
2694 struct reiserfs_transaction_handle myth; 2707 struct reiserfs_transaction_handle myth;
2695 reiserfs_write_lock(inode->i_sb); 2708 lock_depth = reiserfs_write_lock_once(inode->i_sb);
2709 locked = true;
2696 /* If the file have grown beyond the border where it 2710 /* If the file have grown beyond the border where it
2697 can have a tail, unmark it as needing a tail 2711 can have a tail, unmark it as needing a tail
2698 packing */ 2712 packing */
@@ -2703,10 +2717,9 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2703 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 2717 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2704 2718
2705 ret = journal_begin(&myth, inode->i_sb, 1); 2719 ret = journal_begin(&myth, inode->i_sb, 1);
2706 if (ret) { 2720 if (ret)
2707 reiserfs_write_unlock(inode->i_sb);
2708 goto journal_error; 2721 goto journal_error;
2709 } 2722
2710 reiserfs_update_inode_transaction(inode); 2723 reiserfs_update_inode_transaction(inode);
2711 inode->i_size = pos; 2724 inode->i_size = pos;
2712 /* 2725 /*
@@ -2718,34 +2731,36 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2718 reiserfs_update_sd(&myth, inode); 2731 reiserfs_update_sd(&myth, inode);
2719 update_sd = 1; 2732 update_sd = 1;
2720 ret = journal_end(&myth, inode->i_sb, 1); 2733 ret = journal_end(&myth, inode->i_sb, 1);
2721 reiserfs_write_unlock(inode->i_sb);
2722 if (ret) 2734 if (ret)
2723 goto journal_error; 2735 goto journal_error;
2724 } 2736 }
2725 if (th) { 2737 if (th) {
2726 reiserfs_write_lock(inode->i_sb); 2738 if (!locked) {
2739 lock_depth = reiserfs_write_lock_once(inode->i_sb);
2740 locked = true;
2741 }
2727 if (!update_sd) 2742 if (!update_sd)
2728 mark_inode_dirty(inode); 2743 mark_inode_dirty(inode);
2729 ret = reiserfs_end_persistent_transaction(th); 2744 ret = reiserfs_end_persistent_transaction(th);
2730 reiserfs_write_unlock(inode->i_sb);
2731 if (ret) 2745 if (ret)
2732 goto out; 2746 goto out;
2733 } 2747 }
2734 2748
2735 out: 2749 out:
2750 if (locked)
2751 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2736 unlock_page(page); 2752 unlock_page(page);
2737 page_cache_release(page); 2753 page_cache_release(page);
2738 return ret == 0 ? copied : ret; 2754 return ret == 0 ? copied : ret;
2739 2755
2740 journal_error: 2756 journal_error:
2757 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2758 locked = false;
2741 if (th) { 2759 if (th) {
2742 reiserfs_write_lock(inode->i_sb);
2743 if (!update_sd) 2760 if (!update_sd)
2744 reiserfs_update_sd(th, inode); 2761 reiserfs_update_sd(th, inode);
2745 ret = reiserfs_end_persistent_transaction(th); 2762 ret = reiserfs_end_persistent_transaction(th);
2746 reiserfs_write_unlock(inode->i_sb);
2747 } 2763 }
2748
2749 goto out; 2764 goto out;
2750} 2765}
2751 2766
@@ -2758,7 +2773,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2758 int update_sd = 0; 2773 int update_sd = 0;
2759 struct reiserfs_transaction_handle *th = NULL; 2774 struct reiserfs_transaction_handle *th = NULL;
2760 2775
2776 reiserfs_write_unlock(inode->i_sb);
2761 reiserfs_wait_on_write_block(inode->i_sb); 2777 reiserfs_wait_on_write_block(inode->i_sb);
2778 reiserfs_write_lock(inode->i_sb);
2779
2762 if (reiserfs_transaction_running(inode->i_sb)) { 2780 if (reiserfs_transaction_running(inode->i_sb)) {
2763 th = current->journal_info; 2781 th = current->journal_info;
2764 } 2782 }
@@ -2770,7 +2788,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2770 */ 2788 */
2771 if (pos > inode->i_size) { 2789 if (pos > inode->i_size) {
2772 struct reiserfs_transaction_handle myth; 2790 struct reiserfs_transaction_handle myth;
2773 reiserfs_write_lock(inode->i_sb);
2774 /* If the file have grown beyond the border where it 2791 /* If the file have grown beyond the border where it
2775 can have a tail, unmark it as needing a tail 2792 can have a tail, unmark it as needing a tail
2776 packing */ 2793 packing */
@@ -2781,10 +2798,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2781 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; 2798 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2782 2799
2783 ret = journal_begin(&myth, inode->i_sb, 1); 2800 ret = journal_begin(&myth, inode->i_sb, 1);
2784 if (ret) { 2801 if (ret)
2785 reiserfs_write_unlock(inode->i_sb);
2786 goto journal_error; 2802 goto journal_error;
2787 } 2803
2788 reiserfs_update_inode_transaction(inode); 2804 reiserfs_update_inode_transaction(inode);
2789 inode->i_size = pos; 2805 inode->i_size = pos;
2790 /* 2806 /*
@@ -2796,16 +2812,13 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2796 reiserfs_update_sd(&myth, inode); 2812 reiserfs_update_sd(&myth, inode);
2797 update_sd = 1; 2813 update_sd = 1;
2798 ret = journal_end(&myth, inode->i_sb, 1); 2814 ret = journal_end(&myth, inode->i_sb, 1);
2799 reiserfs_write_unlock(inode->i_sb);
2800 if (ret) 2815 if (ret)
2801 goto journal_error; 2816 goto journal_error;
2802 } 2817 }
2803 if (th) { 2818 if (th) {
2804 reiserfs_write_lock(inode->i_sb);
2805 if (!update_sd) 2819 if (!update_sd)
2806 mark_inode_dirty(inode); 2820 mark_inode_dirty(inode);
2807 ret = reiserfs_end_persistent_transaction(th); 2821 ret = reiserfs_end_persistent_transaction(th);
2808 reiserfs_write_unlock(inode->i_sb);
2809 if (ret) 2822 if (ret)
2810 goto out; 2823 goto out;
2811 } 2824 }
@@ -2815,11 +2828,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2815 2828
2816 journal_error: 2829 journal_error:
2817 if (th) { 2830 if (th) {
2818 reiserfs_write_lock(inode->i_sb);
2819 if (!update_sd) 2831 if (!update_sd)
2820 reiserfs_update_sd(th, inode); 2832 reiserfs_update_sd(th, inode);
2821 ret = reiserfs_end_persistent_transaction(th); 2833 ret = reiserfs_end_persistent_transaction(th);
2822 reiserfs_write_unlock(inode->i_sb);
2823 } 2834 }
2824 2835
2825 return ret; 2836 return ret;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 0ccc3fdda7bf..ace77451ceb1 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -13,44 +13,52 @@
13#include <linux/compat.h> 13#include <linux/compat.h>
14 14
15/* 15/*
16** reiserfs_ioctl - handler for ioctl for inode 16 * reiserfs_ioctl - handler for ioctl for inode
17** supported commands: 17 * supported commands:
18** 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect 18 * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
19** and prevent packing file (argument arg has to be non-zero) 19 * and prevent packing file (argument arg has to be non-zero)
20** 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION 20 * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
21** 3) That's all for a while ... 21 * 3) That's all for a while ...
22*/ 22 */
23int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 23long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
24 unsigned long arg)
25{ 24{
25 struct inode *inode = filp->f_path.dentry->d_inode;
26 unsigned int flags; 26 unsigned int flags;
27 int err = 0; 27 int err = 0;
28 28
29 reiserfs_write_lock(inode->i_sb);
30
29 switch (cmd) { 31 switch (cmd) {
30 case REISERFS_IOC_UNPACK: 32 case REISERFS_IOC_UNPACK:
31 if (S_ISREG(inode->i_mode)) { 33 if (S_ISREG(inode->i_mode)) {
32 if (arg) 34 if (arg)
33 return reiserfs_unpack(inode, filp); 35 err = reiserfs_unpack(inode, filp);
34 else
35 return 0;
36 } else 36 } else
37 return -ENOTTY; 37 err = -ENOTTY;
38 /* following two cases are taken from fs/ext2/ioctl.c by Remy 38 break;
39 Card (card@masi.ibp.fr) */ 39 /*
40 * following two cases are taken from fs/ext2/ioctl.c by Remy
41 * Card (card@masi.ibp.fr)
42 */
40 case REISERFS_IOC_GETFLAGS: 43 case REISERFS_IOC_GETFLAGS:
41 if (!reiserfs_attrs(inode->i_sb)) 44 if (!reiserfs_attrs(inode->i_sb)) {
42 return -ENOTTY; 45 err = -ENOTTY;
46 break;
47 }
43 48
44 flags = REISERFS_I(inode)->i_attrs; 49 flags = REISERFS_I(inode)->i_attrs;
45 i_attrs_to_sd_attrs(inode, (__u16 *) & flags); 50 i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
46 return put_user(flags, (int __user *)arg); 51 err = put_user(flags, (int __user *)arg);
52 break;
47 case REISERFS_IOC_SETFLAGS:{ 53 case REISERFS_IOC_SETFLAGS:{
48 if (!reiserfs_attrs(inode->i_sb)) 54 if (!reiserfs_attrs(inode->i_sb)) {
49 return -ENOTTY; 55 err = -ENOTTY;
56 break;
57 }
50 58
51 err = mnt_want_write(filp->f_path.mnt); 59 err = mnt_want_write(filp->f_path.mnt);
52 if (err) 60 if (err)
53 return err; 61 break;
54 62
55 if (!is_owner_or_cap(inode)) { 63 if (!is_owner_or_cap(inode)) {
56 err = -EPERM; 64 err = -EPERM;
@@ -90,16 +98,18 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
90 mark_inode_dirty(inode); 98 mark_inode_dirty(inode);
91setflags_out: 99setflags_out:
92 mnt_drop_write(filp->f_path.mnt); 100 mnt_drop_write(filp->f_path.mnt);
93 return err; 101 break;
94 } 102 }
95 case REISERFS_IOC_GETVERSION: 103 case REISERFS_IOC_GETVERSION:
96 return put_user(inode->i_generation, (int __user *)arg); 104 err = put_user(inode->i_generation, (int __user *)arg);
105 break;
97 case REISERFS_IOC_SETVERSION: 106 case REISERFS_IOC_SETVERSION:
98 if (!is_owner_or_cap(inode)) 107 if (!is_owner_or_cap(inode))
99 return -EPERM; 108 err = -EPERM;
109 break;
100 err = mnt_want_write(filp->f_path.mnt); 110 err = mnt_want_write(filp->f_path.mnt);
101 if (err) 111 if (err)
102 return err; 112 break;
103 if (get_user(inode->i_generation, (int __user *)arg)) { 113 if (get_user(inode->i_generation, (int __user *)arg)) {
104 err = -EFAULT; 114 err = -EFAULT;
105 goto setversion_out; 115 goto setversion_out;
@@ -108,19 +118,20 @@ setflags_out:
108 mark_inode_dirty(inode); 118 mark_inode_dirty(inode);
109setversion_out: 119setversion_out:
110 mnt_drop_write(filp->f_path.mnt); 120 mnt_drop_write(filp->f_path.mnt);
111 return err; 121 break;
112 default: 122 default:
113 return -ENOTTY; 123 err = -ENOTTY;
114 } 124 }
125
126 reiserfs_write_unlock(inode->i_sb);
127
128 return err;
115} 129}
116 130
117#ifdef CONFIG_COMPAT 131#ifdef CONFIG_COMPAT
118long reiserfs_compat_ioctl(struct file *file, unsigned int cmd, 132long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
119 unsigned long arg) 133 unsigned long arg)
120{ 134{
121 struct inode *inode = file->f_path.dentry->d_inode;
122 int ret;
123
124 /* These are just misnamed, they actually get/put from/to user an int */ 135 /* These are just misnamed, they actually get/put from/to user an int */
125 switch (cmd) { 136 switch (cmd) {
126 case REISERFS_IOC32_UNPACK: 137 case REISERFS_IOC32_UNPACK:
@@ -141,10 +152,8 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
141 default: 152 default:
142 return -ENOIOCTLCMD; 153 return -ENOIOCTLCMD;
143 } 154 }
144 lock_kernel(); 155
145 ret = reiserfs_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg)); 156 return reiserfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
146 unlock_kernel();
147 return ret;
148} 157}
149#endif 158#endif
150 159
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 90622200b39c..2f8a7e7b8dab 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -429,21 +429,6 @@ static void clear_prepared_bits(struct buffer_head *bh)
429 clear_buffer_journal_restore_dirty(bh); 429 clear_buffer_journal_restore_dirty(bh);
430} 430}
431 431
432/* utility function to force a BUG if it is called without the big
433** kernel lock held. caller is the string printed just before calling BUG()
434*/
435void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
436{
437#ifdef CONFIG_SMP
438 if (current->lock_depth < 0) {
439 reiserfs_panic(sb, "journal-1", "%s called without kernel "
440 "lock held", caller);
441 }
442#else
443 ;
444#endif
445}
446
447/* return a cnode with same dev, block number and size in table, or null if not found */ 432/* return a cnode with same dev, block number and size in table, or null if not found */
448static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct 433static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
449 super_block 434 super_block
@@ -556,7 +541,8 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
556static inline void lock_journal(struct super_block *sb) 541static inline void lock_journal(struct super_block *sb)
557{ 542{
558 PROC_INFO_INC(sb, journal.lock_journal); 543 PROC_INFO_INC(sb, journal.lock_journal);
559 mutex_lock(&SB_JOURNAL(sb)->j_mutex); 544
545 reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
560} 546}
561 547
562/* unlock the current transaction */ 548/* unlock the current transaction */
@@ -708,7 +694,9 @@ static void check_barrier_completion(struct super_block *s,
708 disable_barrier(s); 694 disable_barrier(s);
709 set_buffer_uptodate(bh); 695 set_buffer_uptodate(bh);
710 set_buffer_dirty(bh); 696 set_buffer_dirty(bh);
697 reiserfs_write_unlock(s);
711 sync_dirty_buffer(bh); 698 sync_dirty_buffer(bh);
699 reiserfs_write_lock(s);
712 } 700 }
713} 701}
714 702
@@ -996,8 +984,13 @@ static int reiserfs_async_progress_wait(struct super_block *s)
996{ 984{
997 DEFINE_WAIT(wait); 985 DEFINE_WAIT(wait);
998 struct reiserfs_journal *j = SB_JOURNAL(s); 986 struct reiserfs_journal *j = SB_JOURNAL(s);
999 if (atomic_read(&j->j_async_throttle)) 987
988 if (atomic_read(&j->j_async_throttle)) {
989 reiserfs_write_unlock(s);
1000 congestion_wait(BLK_RW_ASYNC, HZ / 10); 990 congestion_wait(BLK_RW_ASYNC, HZ / 10);
991 reiserfs_write_lock(s);
992 }
993
1001 return 0; 994 return 0;
1002} 995}
1003 996
@@ -1043,7 +1036,8 @@ static int flush_commit_list(struct super_block *s,
1043 } 1036 }
1044 1037
1045 /* make sure nobody is trying to flush this one at the same time */ 1038 /* make sure nobody is trying to flush this one at the same time */
1046 mutex_lock(&jl->j_commit_mutex); 1039 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
1040
1047 if (!journal_list_still_alive(s, trans_id)) { 1041 if (!journal_list_still_alive(s, trans_id)) {
1048 mutex_unlock(&jl->j_commit_mutex); 1042 mutex_unlock(&jl->j_commit_mutex);
1049 goto put_jl; 1043 goto put_jl;
@@ -1061,12 +1055,17 @@ static int flush_commit_list(struct super_block *s,
1061 1055
1062 if (!list_empty(&jl->j_bh_list)) { 1056 if (!list_empty(&jl->j_bh_list)) {
1063 int ret; 1057 int ret;
1064 unlock_kernel(); 1058
1059 /*
1060 * We might sleep in numerous places inside
1061 * write_ordered_buffers. Relax the write lock.
1062 */
1063 reiserfs_write_unlock(s);
1065 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, 1064 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1066 journal, jl, &jl->j_bh_list); 1065 journal, jl, &jl->j_bh_list);
1067 if (ret < 0 && retval == 0) 1066 if (ret < 0 && retval == 0)
1068 retval = ret; 1067 retval = ret;
1069 lock_kernel(); 1068 reiserfs_write_lock(s);
1070 } 1069 }
1071 BUG_ON(!list_empty(&jl->j_bh_list)); 1070 BUG_ON(!list_empty(&jl->j_bh_list));
1072 /* 1071 /*
@@ -1085,8 +1084,11 @@ static int flush_commit_list(struct super_block *s,
1085 SB_ONDISK_JOURNAL_SIZE(s); 1084 SB_ONDISK_JOURNAL_SIZE(s);
1086 tbh = journal_find_get_block(s, bn); 1085 tbh = journal_find_get_block(s, bn);
1087 if (tbh) { 1086 if (tbh) {
1088 if (buffer_dirty(tbh)) 1087 if (buffer_dirty(tbh)) {
1089 ll_rw_block(WRITE, 1, &tbh) ; 1088 reiserfs_write_unlock(s);
1089 ll_rw_block(WRITE, 1, &tbh);
1090 reiserfs_write_lock(s);
1091 }
1090 put_bh(tbh) ; 1092 put_bh(tbh) ;
1091 } 1093 }
1092 } 1094 }
@@ -1114,12 +1116,19 @@ static int flush_commit_list(struct super_block *s,
1114 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + 1116 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1115 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); 1117 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1116 tbh = journal_find_get_block(s, bn); 1118 tbh = journal_find_get_block(s, bn);
1119
1120 reiserfs_write_unlock(s);
1117 wait_on_buffer(tbh); 1121 wait_on_buffer(tbh);
1122 reiserfs_write_lock(s);
1118 // since we're using ll_rw_blk above, it might have skipped over 1123 // since we're using ll_rw_blk above, it might have skipped over
1119 // a locked buffer. Double check here 1124 // a locked buffer. Double check here
1120 // 1125 //
1121 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */ 1126 /* redundant, sync_dirty_buffer() checks */
1127 if (buffer_dirty(tbh)) {
1128 reiserfs_write_unlock(s);
1122 sync_dirty_buffer(tbh); 1129 sync_dirty_buffer(tbh);
1130 reiserfs_write_lock(s);
1131 }
1123 if (unlikely(!buffer_uptodate(tbh))) { 1132 if (unlikely(!buffer_uptodate(tbh))) {
1124#ifdef CONFIG_REISERFS_CHECK 1133#ifdef CONFIG_REISERFS_CHECK
1125 reiserfs_warning(s, "journal-601", 1134 reiserfs_warning(s, "journal-601",
@@ -1143,10 +1152,15 @@ static int flush_commit_list(struct super_block *s,
1143 if (buffer_dirty(jl->j_commit_bh)) 1152 if (buffer_dirty(jl->j_commit_bh))
1144 BUG(); 1153 BUG();
1145 mark_buffer_dirty(jl->j_commit_bh) ; 1154 mark_buffer_dirty(jl->j_commit_bh) ;
1155 reiserfs_write_unlock(s);
1146 sync_dirty_buffer(jl->j_commit_bh) ; 1156 sync_dirty_buffer(jl->j_commit_bh) ;
1157 reiserfs_write_lock(s);
1147 } 1158 }
1148 } else 1159 } else {
1160 reiserfs_write_unlock(s);
1149 wait_on_buffer(jl->j_commit_bh); 1161 wait_on_buffer(jl->j_commit_bh);
1162 reiserfs_write_lock(s);
1163 }
1150 1164
1151 check_barrier_completion(s, jl->j_commit_bh); 1165 check_barrier_completion(s, jl->j_commit_bh);
1152 1166
@@ -1286,7 +1300,9 @@ static int _update_journal_header_block(struct super_block *sb,
1286 1300
1287 if (trans_id >= journal->j_last_flush_trans_id) { 1301 if (trans_id >= journal->j_last_flush_trans_id) {
1288 if (buffer_locked((journal->j_header_bh))) { 1302 if (buffer_locked((journal->j_header_bh))) {
1303 reiserfs_write_unlock(sb);
1289 wait_on_buffer((journal->j_header_bh)); 1304 wait_on_buffer((journal->j_header_bh));
1305 reiserfs_write_lock(sb);
1290 if (unlikely(!buffer_uptodate(journal->j_header_bh))) { 1306 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1291#ifdef CONFIG_REISERFS_CHECK 1307#ifdef CONFIG_REISERFS_CHECK
1292 reiserfs_warning(sb, "journal-699", 1308 reiserfs_warning(sb, "journal-699",
@@ -1312,12 +1328,16 @@ static int _update_journal_header_block(struct super_block *sb,
1312 disable_barrier(sb); 1328 disable_barrier(sb);
1313 goto sync; 1329 goto sync;
1314 } 1330 }
1331 reiserfs_write_unlock(sb);
1315 wait_on_buffer(journal->j_header_bh); 1332 wait_on_buffer(journal->j_header_bh);
1333 reiserfs_write_lock(sb);
1316 check_barrier_completion(sb, journal->j_header_bh); 1334 check_barrier_completion(sb, journal->j_header_bh);
1317 } else { 1335 } else {
1318 sync: 1336 sync:
1319 set_buffer_dirty(journal->j_header_bh); 1337 set_buffer_dirty(journal->j_header_bh);
1338 reiserfs_write_unlock(sb);
1320 sync_dirty_buffer(journal->j_header_bh); 1339 sync_dirty_buffer(journal->j_header_bh);
1340 reiserfs_write_lock(sb);
1321 } 1341 }
1322 if (!buffer_uptodate(journal->j_header_bh)) { 1342 if (!buffer_uptodate(journal->j_header_bh)) {
1323 reiserfs_warning(sb, "journal-837", 1343 reiserfs_warning(sb, "journal-837",
@@ -1409,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
1409 1429
1410 /* if flushall == 0, the lock is already held */ 1430 /* if flushall == 0, the lock is already held */
1411 if (flushall) { 1431 if (flushall) {
1412 mutex_lock(&journal->j_flush_mutex); 1432 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1413 } else if (mutex_trylock(&journal->j_flush_mutex)) { 1433 } else if (mutex_trylock(&journal->j_flush_mutex)) {
1414 BUG(); 1434 BUG();
1415 } 1435 }
@@ -1553,7 +1573,11 @@ static int flush_journal_list(struct super_block *s,
1553 reiserfs_panic(s, "journal-1011", 1573 reiserfs_panic(s, "journal-1011",
1554 "cn->bh is NULL"); 1574 "cn->bh is NULL");
1555 } 1575 }
1576
1577 reiserfs_write_unlock(s);
1556 wait_on_buffer(cn->bh); 1578 wait_on_buffer(cn->bh);
1579 reiserfs_write_lock(s);
1580
1557 if (!cn->bh) { 1581 if (!cn->bh) {
1558 reiserfs_panic(s, "journal-1012", 1582 reiserfs_panic(s, "journal-1012",
1559 "cn->bh is NULL"); 1583 "cn->bh is NULL");
@@ -1769,7 +1793,7 @@ static int kupdate_transactions(struct super_block *s,
1769 struct reiserfs_journal *journal = SB_JOURNAL(s); 1793 struct reiserfs_journal *journal = SB_JOURNAL(s);
1770 chunk.nr = 0; 1794 chunk.nr = 0;
1771 1795
1772 mutex_lock(&journal->j_flush_mutex); 1796 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1773 if (!journal_list_still_alive(s, orig_trans_id)) { 1797 if (!journal_list_still_alive(s, orig_trans_id)) {
1774 goto done; 1798 goto done;
1775 } 1799 }
@@ -1973,11 +1997,19 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1973 reiserfs_mounted_fs_count--; 1997 reiserfs_mounted_fs_count--;
1974 /* wait for all commits to finish */ 1998 /* wait for all commits to finish */
1975 cancel_delayed_work(&SB_JOURNAL(sb)->j_work); 1999 cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
2000
2001 /*
2002 * We must release the write lock here because
2003 * the workqueue job (flush_async_commit) needs this lock
2004 */
2005 reiserfs_write_unlock(sb);
1976 flush_workqueue(commit_wq); 2006 flush_workqueue(commit_wq);
2007
1977 if (!reiserfs_mounted_fs_count) { 2008 if (!reiserfs_mounted_fs_count) {
1978 destroy_workqueue(commit_wq); 2009 destroy_workqueue(commit_wq);
1979 commit_wq = NULL; 2010 commit_wq = NULL;
1980 } 2011 }
2012 reiserfs_write_lock(sb);
1981 2013
1982 free_journal_ram(sb); 2014 free_journal_ram(sb);
1983 2015
@@ -2243,7 +2275,11 @@ static int journal_read_transaction(struct super_block *sb,
2243 /* read in the log blocks, memcpy to the corresponding real block */ 2275 /* read in the log blocks, memcpy to the corresponding real block */
2244 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); 2276 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2245 for (i = 0; i < get_desc_trans_len(desc); i++) { 2277 for (i = 0; i < get_desc_trans_len(desc); i++) {
2278
2279 reiserfs_write_unlock(sb);
2246 wait_on_buffer(log_blocks[i]); 2280 wait_on_buffer(log_blocks[i]);
2281 reiserfs_write_lock(sb);
2282
2247 if (!buffer_uptodate(log_blocks[i])) { 2283 if (!buffer_uptodate(log_blocks[i])) {
2248 reiserfs_warning(sb, "journal-1212", 2284 reiserfs_warning(sb, "journal-1212",
2249 "REPLAY FAILURE fsck required! " 2285 "REPLAY FAILURE fsck required! "
@@ -2765,11 +2801,27 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
2765 goto free_and_return; 2801 goto free_and_return;
2766 } 2802 }
2767 2803
2804 /*
2805 * We need to unlock here to avoid creating the following
2806 * dependency:
2807 * reiserfs_lock -> sysfs_mutex
2808 * Because the reiserfs mmap path creates the following dependency:
2809 * mm->mmap -> reiserfs_lock, hence we have
2810 * mm->mmap -> reiserfs_lock ->sysfs_mutex
2811 * This would ends up in a circular dependency with sysfs readdir path
2812 * which does sysfs_mutex -> mm->mmap_sem
2813 * This is fine because the reiserfs lock is useless in mount path,
2814 * at least until we call journal_begin. We keep it for paranoid
2815 * reasons.
2816 */
2817 reiserfs_write_unlock(sb);
2768 if (journal_init_dev(sb, journal, j_dev_name) != 0) { 2818 if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2819 reiserfs_write_lock(sb);
2769 reiserfs_warning(sb, "sh-462", 2820 reiserfs_warning(sb, "sh-462",
2770 "unable to initialize jornal device"); 2821 "unable to initialize jornal device");
2771 goto free_and_return; 2822 goto free_and_return;
2772 } 2823 }
2824 reiserfs_write_lock(sb);
2773 2825
2774 rs = SB_DISK_SUPER_BLOCK(sb); 2826 rs = SB_DISK_SUPER_BLOCK(sb);
2775 2827
@@ -2881,8 +2933,11 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
2881 } 2933 }
2882 2934
2883 reiserfs_mounted_fs_count++; 2935 reiserfs_mounted_fs_count++;
2884 if (reiserfs_mounted_fs_count <= 1) 2936 if (reiserfs_mounted_fs_count <= 1) {
2937 reiserfs_write_unlock(sb);
2885 commit_wq = create_workqueue("reiserfs"); 2938 commit_wq = create_workqueue("reiserfs");
2939 reiserfs_write_lock(sb);
2940 }
2886 2941
2887 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); 2942 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2888 journal->j_work_sb = sb; 2943 journal->j_work_sb = sb;
@@ -2964,8 +3019,11 @@ static void queue_log_writer(struct super_block *s)
2964 init_waitqueue_entry(&wait, current); 3019 init_waitqueue_entry(&wait, current);
2965 add_wait_queue(&journal->j_join_wait, &wait); 3020 add_wait_queue(&journal->j_join_wait, &wait);
2966 set_current_state(TASK_UNINTERRUPTIBLE); 3021 set_current_state(TASK_UNINTERRUPTIBLE);
2967 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) 3022 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
3023 reiserfs_write_unlock(s);
2968 schedule(); 3024 schedule();
3025 reiserfs_write_lock(s);
3026 }
2969 __set_current_state(TASK_RUNNING); 3027 __set_current_state(TASK_RUNNING);
2970 remove_wait_queue(&journal->j_join_wait, &wait); 3028 remove_wait_queue(&journal->j_join_wait, &wait);
2971} 3029}
@@ -2982,7 +3040,9 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
2982 struct reiserfs_journal *journal = SB_JOURNAL(sb); 3040 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2983 unsigned long bcount = journal->j_bcount; 3041 unsigned long bcount = journal->j_bcount;
2984 while (1) { 3042 while (1) {
3043 reiserfs_write_unlock(sb);
2985 schedule_timeout_uninterruptible(1); 3044 schedule_timeout_uninterruptible(1);
3045 reiserfs_write_lock(sb);
2986 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; 3046 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2987 while ((atomic_read(&journal->j_wcount) > 0 || 3047 while ((atomic_read(&journal->j_wcount) > 0 ||
2988 atomic_read(&journal->j_jlock)) && 3048 atomic_read(&journal->j_jlock)) &&
@@ -3033,7 +3093,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3033 3093
3034 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { 3094 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3035 unlock_journal(sb); 3095 unlock_journal(sb);
3096 reiserfs_write_unlock(sb);
3036 reiserfs_wait_on_write_block(sb); 3097 reiserfs_wait_on_write_block(sb);
3098 reiserfs_write_lock(sb);
3037 PROC_INFO_INC(sb, journal.journal_relock_writers); 3099 PROC_INFO_INC(sb, journal.journal_relock_writers);
3038 goto relock; 3100 goto relock;
3039 } 3101 }
@@ -3506,14 +3568,14 @@ static void flush_async_commits(struct work_struct *work)
3506 struct reiserfs_journal_list *jl; 3568 struct reiserfs_journal_list *jl;
3507 struct list_head *entry; 3569 struct list_head *entry;
3508 3570
3509 lock_kernel(); 3571 reiserfs_write_lock(sb);
3510 if (!list_empty(&journal->j_journal_list)) { 3572 if (!list_empty(&journal->j_journal_list)) {
3511 /* last entry is the youngest, commit it and you get everything */ 3573 /* last entry is the youngest, commit it and you get everything */
3512 entry = journal->j_journal_list.prev; 3574 entry = journal->j_journal_list.prev;
3513 jl = JOURNAL_LIST_ENTRY(entry); 3575 jl = JOURNAL_LIST_ENTRY(entry);
3514 flush_commit_list(sb, jl, 1); 3576 flush_commit_list(sb, jl, 1);
3515 } 3577 }
3516 unlock_kernel(); 3578 reiserfs_write_unlock(sb);
3517} 3579}
3518 3580
3519/* 3581/*
@@ -4041,7 +4103,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4041 * the new transaction is fully setup, and we've already flushed the 4103 * the new transaction is fully setup, and we've already flushed the
4042 * ordered bh list 4104 * ordered bh list
4043 */ 4105 */
4044 mutex_lock(&jl->j_commit_mutex); 4106 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
4045 4107
4046 /* save the transaction id in case we need to commit it later */ 4108 /* save the transaction id in case we need to commit it later */
4047 commit_trans_id = jl->j_trans_id; 4109 commit_trans_id = jl->j_trans_id;
@@ -4156,7 +4218,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4156 next = cn->next; 4218 next = cn->next;
4157 free_cnode(sb, cn); 4219 free_cnode(sb, cn);
4158 cn = next; 4220 cn = next;
4221 reiserfs_write_unlock(sb);
4159 cond_resched(); 4222 cond_resched();
4223 reiserfs_write_lock(sb);
4160 } 4224 }
4161 4225
4162 /* we are done with both the c_bh and d_bh, but 4226 /* we are done with both the c_bh and d_bh, but
@@ -4203,10 +4267,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4203 * is lost. 4267 * is lost.
4204 */ 4268 */
4205 if (!list_empty(&jl->j_tail_bh_list)) { 4269 if (!list_empty(&jl->j_tail_bh_list)) {
4206 unlock_kernel(); 4270 reiserfs_write_unlock(sb);
4207 write_ordered_buffers(&journal->j_dirty_buffers_lock, 4271 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4208 journal, jl, &jl->j_tail_bh_list); 4272 journal, jl, &jl->j_tail_bh_list);
4209 lock_kernel(); 4273 reiserfs_write_lock(sb);
4210 } 4274 }
4211 BUG_ON(!list_empty(&jl->j_tail_bh_list)); 4275 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4212 mutex_unlock(&jl->j_commit_mutex); 4276 mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
new file mode 100644
index 000000000000..ee2cfc0fd8a7
--- /dev/null
+++ b/fs/reiserfs/lock.c
@@ -0,0 +1,88 @@
1#include <linux/reiserfs_fs.h>
2#include <linux/mutex.h>
3
4/*
5 * The previous reiserfs locking scheme was heavily based on
6 * the tricky properties of the Bkl:
7 *
8 * - it was acquired recursively by a same task
9 * - the performances relied on the release-while-schedule() property
10 *
11 * Now that we replace it by a mutex, we still want to keep the same
12 * recursive property to avoid big changes in the code structure.
13 * We use our own lock_owner here because the owner field on a mutex
14 * is only available in SMP or mutex debugging, also we only need this field
15 * for this mutex, no need for a system wide mutex facility.
16 *
17 * Also this lock is often released before a call that could block because
18 * reiserfs performances were partialy based on the release while schedule()
19 * property of the Bkl.
20 */
21void reiserfs_write_lock(struct super_block *s)
22{
23 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
24
25 if (sb_i->lock_owner != current) {
26 mutex_lock(&sb_i->lock);
27 sb_i->lock_owner = current;
28 }
29
30 /* No need to protect it, only the current task touches it */
31 sb_i->lock_depth++;
32}
33
34void reiserfs_write_unlock(struct super_block *s)
35{
36 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
37
38 /*
39 * Are we unlocking without even holding the lock?
40 * Such a situation must raise a BUG() if we don't want
41 * to corrupt the data.
42 */
43 BUG_ON(sb_i->lock_owner != current);
44
45 if (--sb_i->lock_depth == -1) {
46 sb_i->lock_owner = NULL;
47 mutex_unlock(&sb_i->lock);
48 }
49}
50
51/*
52 * If we already own the lock, just exit and don't increase the depth.
53 * Useful when we don't want to lock more than once.
54 *
55 * We always return the lock_depth we had before calling
56 * this function.
57 */
58int reiserfs_write_lock_once(struct super_block *s)
59{
60 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
61
62 if (sb_i->lock_owner != current) {
63 mutex_lock(&sb_i->lock);
64 sb_i->lock_owner = current;
65 return sb_i->lock_depth++;
66 }
67
68 return sb_i->lock_depth;
69}
70
71void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
72{
73 if (lock_depth == -1)
74 reiserfs_write_unlock(s);
75}
76
77/*
78 * Utility function to force a BUG if it is called without the superblock
79 * write lock held. caller is the string printed just before calling BUG()
80 */
81void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
82{
83 struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
84
85 if (sb_i->lock_depth < 0)
86 reiserfs_panic(sb, "%s called without kernel lock held %d",
87 caller);
88}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 271579128634..e296ff72a6cc 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -324,6 +324,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
324 struct nameidata *nd) 324 struct nameidata *nd)
325{ 325{
326 int retval; 326 int retval;
327 int lock_depth;
327 struct inode *inode = NULL; 328 struct inode *inode = NULL;
328 struct reiserfs_dir_entry de; 329 struct reiserfs_dir_entry de;
329 INITIALIZE_PATH(path_to_entry); 330 INITIALIZE_PATH(path_to_entry);
@@ -331,7 +332,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
331 if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len) 332 if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
332 return ERR_PTR(-ENAMETOOLONG); 333 return ERR_PTR(-ENAMETOOLONG);
333 334
334 reiserfs_write_lock(dir->i_sb); 335 /*
336 * Might be called with or without the write lock, must be careful
337 * to not recursively hold it in case we want to release the lock
338 * before rescheduling.
339 */
340 lock_depth = reiserfs_write_lock_once(dir->i_sb);
341
335 de.de_gen_number_bit_string = NULL; 342 de.de_gen_number_bit_string = NULL;
336 retval = 343 retval =
337 reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, 344 reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len,
@@ -341,7 +348,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
341 inode = reiserfs_iget(dir->i_sb, 348 inode = reiserfs_iget(dir->i_sb,
342 (struct cpu_key *)&(de.de_dir_id)); 349 (struct cpu_key *)&(de.de_dir_id));
343 if (!inode || IS_ERR(inode)) { 350 if (!inode || IS_ERR(inode)) {
344 reiserfs_write_unlock(dir->i_sb); 351 reiserfs_write_unlock_once(dir->i_sb, lock_depth);
345 return ERR_PTR(-EACCES); 352 return ERR_PTR(-EACCES);
346 } 353 }
347 354
@@ -350,7 +357,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
350 if (IS_PRIVATE(dir)) 357 if (IS_PRIVATE(dir))
351 inode->i_flags |= S_PRIVATE; 358 inode->i_flags |= S_PRIVATE;
352 } 359 }
353 reiserfs_write_unlock(dir->i_sb); 360 reiserfs_write_unlock_once(dir->i_sb, lock_depth);
354 if (retval == IO_ERROR) { 361 if (retval == IO_ERROR) {
355 return ERR_PTR(-EIO); 362 return ERR_PTR(-EIO);
356 } 363 }
@@ -725,6 +732,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
725 struct inode *inode; 732 struct inode *inode;
726 struct reiserfs_transaction_handle th; 733 struct reiserfs_transaction_handle th;
727 struct reiserfs_security_handle security; 734 struct reiserfs_security_handle security;
735 int lock_depth;
728 /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ 736 /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
729 int jbegin_count = 737 int jbegin_count =
730 JOURNAL_PER_BALANCE_CNT * 3 + 738 JOURNAL_PER_BALANCE_CNT * 3 +
@@ -748,7 +756,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
748 return retval; 756 return retval;
749 } 757 }
750 jbegin_count += retval; 758 jbegin_count += retval;
751 reiserfs_write_lock(dir->i_sb); 759 lock_depth = reiserfs_write_lock_once(dir->i_sb);
752 760
753 retval = journal_begin(&th, dir->i_sb, jbegin_count); 761 retval = journal_begin(&th, dir->i_sb, jbegin_count);
754 if (retval) { 762 if (retval) {
@@ -798,8 +806,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
798 d_instantiate(dentry, inode); 806 d_instantiate(dentry, inode);
799 unlock_new_inode(inode); 807 unlock_new_inode(inode);
800 retval = journal_end(&th, dir->i_sb, jbegin_count); 808 retval = journal_end(&th, dir->i_sb, jbegin_count);
801 out_failed: 809out_failed:
802 reiserfs_write_unlock(dir->i_sb); 810 reiserfs_write_unlock_once(dir->i_sb, lock_depth);
803 return retval; 811 return retval;
804} 812}
805 813
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 536eacaeb710..adbc6f538515 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -349,10 +349,6 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
349 349
350 . */ 350 . */
351 351
352#ifdef CONFIG_REISERFS_CHECK
353extern struct tree_balance *cur_tb;
354#endif
355
356void __reiserfs_panic(struct super_block *sb, const char *id, 352void __reiserfs_panic(struct super_block *sb, const char *id,
357 const char *function, const char *fmt, ...) 353 const char *function, const char *fmt, ...)
358{ 354{
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 18b315d3d104..b3a94d20f0fc 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -141,7 +141,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
141 141
142 set_buffer_uptodate(bh); 142 set_buffer_uptodate(bh);
143 mark_buffer_dirty(bh); 143 mark_buffer_dirty(bh);
144 reiserfs_write_unlock(s);
144 sync_dirty_buffer(bh); 145 sync_dirty_buffer(bh);
146 reiserfs_write_lock(s);
145 // update bitmap_info stuff 147 // update bitmap_info stuff
146 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1; 148 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
147 brelse(bh); 149 brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index d036ee5b1c81..5fa7118f04e1 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -222,9 +222,6 @@ static inline int bin_search(const void *key, /* Key to search for. */
222 return ITEM_NOT_FOUND; 222 return ITEM_NOT_FOUND;
223} 223}
224 224
225#ifdef CONFIG_REISERFS_CHECK
226extern struct tree_balance *cur_tb;
227#endif
228 225
229/* Minimal possible key. It is never in the tree. */ 226/* Minimal possible key. It is never in the tree. */
230const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} }; 227const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
@@ -519,25 +516,48 @@ static int is_tree_node(struct buffer_head *bh, int level)
519 516
520#define SEARCH_BY_KEY_READA 16 517#define SEARCH_BY_KEY_READA 16
521 518
522/* The function is NOT SCHEDULE-SAFE! */ 519/*
523static void search_by_key_reada(struct super_block *s, 520 * The function is NOT SCHEDULE-SAFE!
521 * It might unlock the write lock if we needed to wait for a block
522 * to be read. Note that in this case it won't recover the lock to avoid
523 * high contention resulting from too much lock requests, especially
524 * the caller (search_by_key) will perform other schedule-unsafe
525 * operations just after calling this function.
526 *
527 * @return true if we have unlocked
528 */
529static bool search_by_key_reada(struct super_block *s,
524 struct buffer_head **bh, 530 struct buffer_head **bh,
525 b_blocknr_t *b, int num) 531 b_blocknr_t *b, int num)
526{ 532{
527 int i, j; 533 int i, j;
534 bool unlocked = false;
528 535
529 for (i = 0; i < num; i++) { 536 for (i = 0; i < num; i++) {
530 bh[i] = sb_getblk(s, b[i]); 537 bh[i] = sb_getblk(s, b[i]);
531 } 538 }
539 /*
540 * We are going to read some blocks on which we
541 * have a reference. It's safe, though we might be
542 * reading blocks concurrently changed if we release
543 * the lock. But it's still fine because we check later
544 * if the tree changed
545 */
532 for (j = 0; j < i; j++) { 546 for (j = 0; j < i; j++) {
533 /* 547 /*
534 * note, this needs attention if we are getting rid of the BKL 548 * note, this needs attention if we are getting rid of the BKL
535 * you have to make sure the prepared bit isn't set on this buffer 549 * you have to make sure the prepared bit isn't set on this buffer
536 */ 550 */
537 if (!buffer_uptodate(bh[j])) 551 if (!buffer_uptodate(bh[j])) {
552 if (!unlocked) {
553 reiserfs_write_unlock(s);
554 unlocked = true;
555 }
538 ll_rw_block(READA, 1, bh + j); 556 ll_rw_block(READA, 1, bh + j);
557 }
539 brelse(bh[j]); 558 brelse(bh[j]);
540 } 559 }
560 return unlocked;
541} 561}
542 562
543/************************************************************************** 563/**************************************************************************
@@ -625,11 +645,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
625 have a pointer to it. */ 645 have a pointer to it. */
626 if ((bh = last_element->pe_buffer = 646 if ((bh = last_element->pe_buffer =
627 sb_getblk(sb, block_number))) { 647 sb_getblk(sb, block_number))) {
648 bool unlocked = false;
649
628 if (!buffer_uptodate(bh) && reada_count > 1) 650 if (!buffer_uptodate(bh) && reada_count > 1)
629 search_by_key_reada(sb, reada_bh, 651 /* may unlock the write lock */
652 unlocked = search_by_key_reada(sb, reada_bh,
630 reada_blocks, reada_count); 653 reada_blocks, reada_count);
654 /*
655 * If we haven't already unlocked the write lock,
656 * then we need to do that here before reading
657 * the current block
658 */
659 if (!buffer_uptodate(bh) && !unlocked) {
660 reiserfs_write_unlock(sb);
661 unlocked = true;
662 }
631 ll_rw_block(READ, 1, &bh); 663 ll_rw_block(READ, 1, &bh);
632 wait_on_buffer(bh); 664 wait_on_buffer(bh);
665
666 if (unlocked)
667 reiserfs_write_lock(sb);
633 if (!buffer_uptodate(bh)) 668 if (!buffer_uptodate(bh))
634 goto io_error; 669 goto io_error;
635 } else { 670 } else {
@@ -673,7 +708,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
673 !key_in_buffer(search_path, key, sb), 708 !key_in_buffer(search_path, key, sb),
674 "PAP-5130: key is not in the buffer"); 709 "PAP-5130: key is not in the buffer");
675#ifdef CONFIG_REISERFS_CHECK 710#ifdef CONFIG_REISERFS_CHECK
676 if (cur_tb) { 711 if (REISERFS_SB(sb)->cur_tb) {
677 print_cur_tb("5140"); 712 print_cur_tb("5140");
678 reiserfs_panic(sb, "PAP-5140", 713 reiserfs_panic(sb, "PAP-5140",
679 "schedule occurred in do_balance!"); 714 "schedule occurred in do_balance!");
@@ -1024,7 +1059,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
1024 reiserfs_free_block(th, inode, block, 1); 1059 reiserfs_free_block(th, inode, block, 1);
1025 } 1060 }
1026 1061
1062 reiserfs_write_unlock(sb);
1027 cond_resched(); 1063 cond_resched();
1064 reiserfs_write_lock(sb);
1028 1065
1029 if (item_moved (&s_ih, path)) { 1066 if (item_moved (&s_ih, path)) {
1030 need_re_search = 1; 1067 need_re_search = 1;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f0ad05f38022..339b0baf2af6 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -465,7 +465,7 @@ static void reiserfs_put_super(struct super_block *s)
465 struct reiserfs_transaction_handle th; 465 struct reiserfs_transaction_handle th;
466 th.t_trans_id = 0; 466 th.t_trans_id = 0;
467 467
468 lock_kernel(); 468 reiserfs_write_lock(s);
469 469
470 if (s->s_dirt) 470 if (s->s_dirt)
471 reiserfs_write_super(s); 471 reiserfs_write_super(s);
@@ -499,10 +499,10 @@ static void reiserfs_put_super(struct super_block *s)
499 499
500 reiserfs_proc_info_done(s); 500 reiserfs_proc_info_done(s);
501 501
502 reiserfs_write_unlock(s);
503 mutex_destroy(&REISERFS_SB(s)->lock);
502 kfree(s->s_fs_info); 504 kfree(s->s_fs_info);
503 s->s_fs_info = NULL; 505 s->s_fs_info = NULL;
504
505 unlock_kernel();
506} 506}
507 507
508static struct kmem_cache *reiserfs_inode_cachep; 508static struct kmem_cache *reiserfs_inode_cachep;
@@ -554,25 +554,28 @@ static void reiserfs_dirty_inode(struct inode *inode)
554 struct reiserfs_transaction_handle th; 554 struct reiserfs_transaction_handle th;
555 555
556 int err = 0; 556 int err = 0;
557 int lock_depth;
558
557 if (inode->i_sb->s_flags & MS_RDONLY) { 559 if (inode->i_sb->s_flags & MS_RDONLY) {
558 reiserfs_warning(inode->i_sb, "clm-6006", 560 reiserfs_warning(inode->i_sb, "clm-6006",
559 "writing inode %lu on readonly FS", 561 "writing inode %lu on readonly FS",
560 inode->i_ino); 562 inode->i_ino);
561 return; 563 return;
562 } 564 }
563 reiserfs_write_lock(inode->i_sb); 565 lock_depth = reiserfs_write_lock_once(inode->i_sb);
564 566
565 /* this is really only used for atime updates, so they don't have 567 /* this is really only used for atime updates, so they don't have
566 ** to be included in O_SYNC or fsync 568 ** to be included in O_SYNC or fsync
567 */ 569 */
568 err = journal_begin(&th, inode->i_sb, 1); 570 err = journal_begin(&th, inode->i_sb, 1);
569 if (err) { 571 if (err)
570 reiserfs_write_unlock(inode->i_sb); 572 goto out;
571 return; 573
572 }
573 reiserfs_update_sd(&th, inode); 574 reiserfs_update_sd(&th, inode);
574 journal_end(&th, inode->i_sb, 1); 575 journal_end(&th, inode->i_sb, 1);
575 reiserfs_write_unlock(inode->i_sb); 576
577out:
578 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
576} 579}
577 580
578#ifdef CONFIG_QUOTA 581#ifdef CONFIG_QUOTA
@@ -1168,11 +1171,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1168 unsigned int qfmt = 0; 1171 unsigned int qfmt = 0;
1169#ifdef CONFIG_QUOTA 1172#ifdef CONFIG_QUOTA
1170 int i; 1173 int i;
1174#endif
1175
1176 reiserfs_write_lock(s);
1171 1177
1178#ifdef CONFIG_QUOTA
1172 memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names)); 1179 memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
1173#endif 1180#endif
1174 1181
1175 lock_kernel();
1176 rs = SB_DISK_SUPER_BLOCK(s); 1182 rs = SB_DISK_SUPER_BLOCK(s);
1177 1183
1178 if (!reiserfs_parse_options 1184 if (!reiserfs_parse_options
@@ -1295,12 +1301,12 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1295 1301
1296out_ok: 1302out_ok:
1297 replace_mount_options(s, new_opts); 1303 replace_mount_options(s, new_opts);
1298 unlock_kernel(); 1304 reiserfs_write_unlock(s);
1299 return 0; 1305 return 0;
1300 1306
1301out_err: 1307out_err:
1302 kfree(new_opts); 1308 kfree(new_opts);
1303 unlock_kernel(); 1309 reiserfs_write_unlock(s);
1304 return err; 1310 return err;
1305} 1311}
1306 1312
@@ -1404,7 +1410,9 @@ static int read_super_block(struct super_block *s, int offset)
1404static int reread_meta_blocks(struct super_block *s) 1410static int reread_meta_blocks(struct super_block *s)
1405{ 1411{
1406 ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s))); 1412 ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
1413 reiserfs_write_unlock(s);
1407 wait_on_buffer(SB_BUFFER_WITH_SB(s)); 1414 wait_on_buffer(SB_BUFFER_WITH_SB(s));
1415 reiserfs_write_lock(s);
1408 if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { 1416 if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
1409 reiserfs_warning(s, "reiserfs-2504", "error reading the super"); 1417 reiserfs_warning(s, "reiserfs-2504", "error reading the super");
1410 return 1; 1418 return 1;
@@ -1613,7 +1621,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1613 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); 1621 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
1614 if (!sbi) { 1622 if (!sbi) {
1615 errval = -ENOMEM; 1623 errval = -ENOMEM;
1616 goto error; 1624 goto error_alloc;
1617 } 1625 }
1618 s->s_fs_info = sbi; 1626 s->s_fs_info = sbi;
1619 /* Set default values for options: non-aggressive tails, RO on errors */ 1627 /* Set default values for options: non-aggressive tails, RO on errors */
@@ -1627,6 +1635,20 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1627 /* setup default block allocator options */ 1635 /* setup default block allocator options */
1628 reiserfs_init_alloc_options(s); 1636 reiserfs_init_alloc_options(s);
1629 1637
1638 mutex_init(&REISERFS_SB(s)->lock);
1639 REISERFS_SB(s)->lock_depth = -1;
1640
1641 /*
1642 * This function is called with the bkl, which also was the old
1643 * locking used here.
1644 * do_journal_begin() will soon check if we hold the lock (ie: was the
1645 * bkl). This is likely because do_journal_begin() has several another
1646 * callers because at this time, it doesn't seem to be necessary to
1647 * protect against anything.
1648 * Anyway, let's be conservative and lock for now.
1649 */
1650 reiserfs_write_lock(s);
1651
1630 jdev_name = NULL; 1652 jdev_name = NULL;
1631 if (reiserfs_parse_options 1653 if (reiserfs_parse_options
1632 (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name, 1654 (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
@@ -1852,9 +1874,13 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1852 init_waitqueue_head(&(sbi->s_wait)); 1874 init_waitqueue_head(&(sbi->s_wait));
1853 spin_lock_init(&sbi->bitmap_lock); 1875 spin_lock_init(&sbi->bitmap_lock);
1854 1876
1877 reiserfs_write_unlock(s);
1878
1855 return (0); 1879 return (0);
1856 1880
1857error: 1881error:
1882 reiserfs_write_unlock(s);
1883error_alloc:
1858 if (jinit_done) { /* kill the commit thread, free journal ram */ 1884 if (jinit_done) { /* kill the commit thread, free journal ram */
1859 journal_release_error(NULL, s); 1885 journal_release_error(NULL, s);
1860 } 1886 }
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6925b835a43b..58aa8e75f7f5 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -975,7 +975,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
975 int err = 0; 975 int err = 0;
976 976
977 /* If we don't have the privroot located yet - go find it */ 977 /* If we don't have the privroot located yet - go find it */
978 mutex_lock(&s->s_root->d_inode->i_mutex); 978 reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
979 dentry = lookup_one_len(PRIVROOT_NAME, s->s_root, 979 dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
980 strlen(PRIVROOT_NAME)); 980 strlen(PRIVROOT_NAME));
981 if (!IS_ERR(dentry)) { 981 if (!IS_ERR(dentry)) {
@@ -1004,14 +1004,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
1004 goto error; 1004 goto error;
1005 1005
1006 if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) { 1006 if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
1007 mutex_lock(&s->s_root->d_inode->i_mutex); 1007 reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
1008 err = create_privroot(REISERFS_SB(s)->priv_root); 1008 err = create_privroot(REISERFS_SB(s)->priv_root);
1009 mutex_unlock(&s->s_root->d_inode->i_mutex); 1009 mutex_unlock(&s->s_root->d_inode->i_mutex);
1010 } 1010 }
1011 1011
1012 if (privroot->d_inode) { 1012 if (privroot->d_inode) {
1013 s->s_xattr = reiserfs_xattr_handlers; 1013 s->s_xattr = reiserfs_xattr_handlers;
1014 mutex_lock(&privroot->d_inode->i_mutex); 1014 reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
1015 if (!REISERFS_SB(s)->xattr_root) { 1015 if (!REISERFS_SB(s)->xattr_root) {
1016 struct dentry *dentry; 1016 struct dentry *dentry;
1017 dentry = lookup_one_len(XAROOT_NAME, privroot, 1017 dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/splice.c b/fs/splice.c
index 7394e9e17534..39208663aaf1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -648,9 +648,11 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
648 ret = buf->ops->confirm(pipe, buf); 648 ret = buf->ops->confirm(pipe, buf);
649 if (!ret) { 649 if (!ret) {
650 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; 650 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
651 651 if (file->f_op && file->f_op->sendpage)
652 ret = file->f_op->sendpage(file, buf->page, buf->offset, 652 ret = file->f_op->sendpage(file, buf->page, buf->offset,
653 sd->len, &pos, more); 653 sd->len, &pos, more);
654 else
655 ret = -EINVAL;
654 } 656 }
655 657
656 return ret; 658 return ret;
@@ -1068,8 +1070,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
1068 if (unlikely(ret < 0)) 1070 if (unlikely(ret < 0))
1069 return ret; 1071 return ret;
1070 1072
1071 splice_write = out->f_op->splice_write; 1073 if (out->f_op && out->f_op->splice_write)
1072 if (!splice_write) 1074 splice_write = out->f_op->splice_write;
1075 else
1073 splice_write = default_file_splice_write; 1076 splice_write = default_file_splice_write;
1074 1077
1075 return splice_write(pipe, out, ppos, len, flags); 1078 return splice_write(pipe, out, ppos, len, flags);
@@ -1093,8 +1096,9 @@ static long do_splice_to(struct file *in, loff_t *ppos,
1093 if (unlikely(ret < 0)) 1096 if (unlikely(ret < 0))
1094 return ret; 1097 return ret;
1095 1098
1096 splice_read = in->f_op->splice_read; 1099 if (in->f_op && in->f_op->splice_read)
1097 if (!splice_read) 1100 splice_read = in->f_op->splice_read;
1101 else
1098 splice_read = default_file_splice_read; 1102 splice_read = default_file_splice_read;
1099 1103
1100 return splice_read(in, ppos, pipe, len, flags); 1104 return splice_read(in, ppos, pipe, len, flags);
@@ -1316,7 +1320,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1316 if (off_in) 1320 if (off_in)
1317 return -ESPIPE; 1321 return -ESPIPE;
1318 if (off_out) { 1322 if (off_out) {
1319 if (out->f_op->llseek == no_llseek) 1323 if (!out->f_op || !out->f_op->llseek ||
1324 out->f_op->llseek == no_llseek)
1320 return -EINVAL; 1325 return -EINVAL;
1321 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1326 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1322 return -EFAULT; 1327 return -EFAULT;
@@ -1336,7 +1341,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1336 if (off_out) 1341 if (off_out)
1337 return -ESPIPE; 1342 return -ESPIPE;
1338 if (off_in) { 1343 if (off_in) {
1339 if (in->f_op->llseek == no_llseek) 1344 if (!in->f_op || !in->f_op->llseek ||
1345 in->f_op->llseek == no_llseek)
1340 return -EINVAL; 1346 return -EINVAL;
1341 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1347 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1342 return -EFAULT; 1348 return -EFAULT;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index f94ddf7efba0..868a55ee080f 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -23,7 +23,7 @@
23/* 23/*
24 * This file implements functions needed to recover from unclean un-mounts. 24 * This file implements functions needed to recover from unclean un-mounts.
25 * When UBIFS is mounted, it checks a flag on the master node to determine if 25 * When UBIFS is mounted, it checks a flag on the master node to determine if
26 * an un-mount was completed sucessfully. If not, the process of mounting 26 * an un-mount was completed successfully. If not, the process of mounting
27 * incorparates additional checking and fixing of on-flash data structures. 27 * incorparates additional checking and fixing of on-flash data structures.
28 * UBIFS always cleans away all remnants of an unclean un-mount, so that 28 * UBIFS always cleans away all remnants of an unclean un-mount, so that
29 * errors do not accumulate. However UBIFS defers recovery if it is mounted 29 * errors do not accumulate. However UBIFS defers recovery if it is mounted
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index c6ad7c7e3ee9..05ac0fe9c4d3 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
36 if (count == 0) 36 if (count == 0)
37 return NULL; 37 return NULL;
38 38
39 acl = posix_acl_alloc(count, GFP_KERNEL); 39 acl = posix_acl_alloc(count, GFP_NOFS);
40 if (!acl) 40 if (!acl)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 acl_e = acl->a_entries; 42 acl_e = acl->a_entries;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c2e30eea74dc..70f989895d15 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -904,16 +904,9 @@ xfs_convert_page(
904 904
905 if (startio) { 905 if (startio) {
906 if (count) { 906 if (count) {
907 struct backing_dev_info *bdi;
908
909 bdi = inode->i_mapping->backing_dev_info;
910 wbc->nr_to_write--; 907 wbc->nr_to_write--;
911 if (bdi_write_congested(bdi)) { 908 if (wbc->nr_to_write <= 0)
912 wbc->encountered_congestion = 1;
913 done = 1;
914 } else if (wbc->nr_to_write <= 0) {
915 done = 1; 909 done = 1;
916 }
917 } 910 }
918 xfs_start_page_writeback(page, !page_dirty, count); 911 xfs_start_page_writeback(page, !page_dirty, count);
919 } 912 }
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index c5bc67c4e3bb..7bb5092d6ae4 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -55,170 +55,140 @@ xfs_stats_clear_proc_handler(
55 55
56static ctl_table xfs_table[] = { 56static ctl_table xfs_table[] = {
57 { 57 {
58 .ctl_name = XFS_SGID_INHERIT,
59 .procname = "irix_sgid_inherit", 58 .procname = "irix_sgid_inherit",
60 .data = &xfs_params.sgid_inherit.val, 59 .data = &xfs_params.sgid_inherit.val,
61 .maxlen = sizeof(int), 60 .maxlen = sizeof(int),
62 .mode = 0644, 61 .mode = 0644,
63 .proc_handler = &proc_dointvec_minmax, 62 .proc_handler = proc_dointvec_minmax,
64 .strategy = &sysctl_intvec,
65 .extra1 = &xfs_params.sgid_inherit.min, 63 .extra1 = &xfs_params.sgid_inherit.min,
66 .extra2 = &xfs_params.sgid_inherit.max 64 .extra2 = &xfs_params.sgid_inherit.max
67 }, 65 },
68 { 66 {
69 .ctl_name = XFS_SYMLINK_MODE,
70 .procname = "irix_symlink_mode", 67 .procname = "irix_symlink_mode",
71 .data = &xfs_params.symlink_mode.val, 68 .data = &xfs_params.symlink_mode.val,
72 .maxlen = sizeof(int), 69 .maxlen = sizeof(int),
73 .mode = 0644, 70 .mode = 0644,
74 .proc_handler = &proc_dointvec_minmax, 71 .proc_handler = proc_dointvec_minmax,
75 .strategy = &sysctl_intvec,
76 .extra1 = &xfs_params.symlink_mode.min, 72 .extra1 = &xfs_params.symlink_mode.min,
77 .extra2 = &xfs_params.symlink_mode.max 73 .extra2 = &xfs_params.symlink_mode.max
78 }, 74 },
79 { 75 {
80 .ctl_name = XFS_PANIC_MASK,
81 .procname = "panic_mask", 76 .procname = "panic_mask",
82 .data = &xfs_params.panic_mask.val, 77 .data = &xfs_params.panic_mask.val,
83 .maxlen = sizeof(int), 78 .maxlen = sizeof(int),
84 .mode = 0644, 79 .mode = 0644,
85 .proc_handler = &proc_dointvec_minmax, 80 .proc_handler = proc_dointvec_minmax,
86 .strategy = &sysctl_intvec,
87 .extra1 = &xfs_params.panic_mask.min, 81 .extra1 = &xfs_params.panic_mask.min,
88 .extra2 = &xfs_params.panic_mask.max 82 .extra2 = &xfs_params.panic_mask.max
89 }, 83 },
90 84
91 { 85 {
92 .ctl_name = XFS_ERRLEVEL,
93 .procname = "error_level", 86 .procname = "error_level",
94 .data = &xfs_params.error_level.val, 87 .data = &xfs_params.error_level.val,
95 .maxlen = sizeof(int), 88 .maxlen = sizeof(int),
96 .mode = 0644, 89 .mode = 0644,
97 .proc_handler = &proc_dointvec_minmax, 90 .proc_handler = proc_dointvec_minmax,
98 .strategy = &sysctl_intvec,
99 .extra1 = &xfs_params.error_level.min, 91 .extra1 = &xfs_params.error_level.min,
100 .extra2 = &xfs_params.error_level.max 92 .extra2 = &xfs_params.error_level.max
101 }, 93 },
102 { 94 {
103 .ctl_name = XFS_SYNCD_TIMER,
104 .procname = "xfssyncd_centisecs", 95 .procname = "xfssyncd_centisecs",
105 .data = &xfs_params.syncd_timer.val, 96 .data = &xfs_params.syncd_timer.val,
106 .maxlen = sizeof(int), 97 .maxlen = sizeof(int),
107 .mode = 0644, 98 .mode = 0644,
108 .proc_handler = &proc_dointvec_minmax, 99 .proc_handler = proc_dointvec_minmax,
109 .strategy = &sysctl_intvec,
110 .extra1 = &xfs_params.syncd_timer.min, 100 .extra1 = &xfs_params.syncd_timer.min,
111 .extra2 = &xfs_params.syncd_timer.max 101 .extra2 = &xfs_params.syncd_timer.max
112 }, 102 },
113 { 103 {
114 .ctl_name = XFS_INHERIT_SYNC,
115 .procname = "inherit_sync", 104 .procname = "inherit_sync",
116 .data = &xfs_params.inherit_sync.val, 105 .data = &xfs_params.inherit_sync.val,
117 .maxlen = sizeof(int), 106 .maxlen = sizeof(int),
118 .mode = 0644, 107 .mode = 0644,
119 .proc_handler = &proc_dointvec_minmax, 108 .proc_handler = proc_dointvec_minmax,
120 .strategy = &sysctl_intvec,
121 .extra1 = &xfs_params.inherit_sync.min, 109 .extra1 = &xfs_params.inherit_sync.min,
122 .extra2 = &xfs_params.inherit_sync.max 110 .extra2 = &xfs_params.inherit_sync.max
123 }, 111 },
124 { 112 {
125 .ctl_name = XFS_INHERIT_NODUMP,
126 .procname = "inherit_nodump", 113 .procname = "inherit_nodump",
127 .data = &xfs_params.inherit_nodump.val, 114 .data = &xfs_params.inherit_nodump.val,
128 .maxlen = sizeof(int), 115 .maxlen = sizeof(int),
129 .mode = 0644, 116 .mode = 0644,
130 .proc_handler = &proc_dointvec_minmax, 117 .proc_handler = proc_dointvec_minmax,
131 .strategy = &sysctl_intvec,
132 .extra1 = &xfs_params.inherit_nodump.min, 118 .extra1 = &xfs_params.inherit_nodump.min,
133 .extra2 = &xfs_params.inherit_nodump.max 119 .extra2 = &xfs_params.inherit_nodump.max
134 }, 120 },
135 { 121 {
136 .ctl_name = XFS_INHERIT_NOATIME,
137 .procname = "inherit_noatime", 122 .procname = "inherit_noatime",
138 .data = &xfs_params.inherit_noatim.val, 123 .data = &xfs_params.inherit_noatim.val,
139 .maxlen = sizeof(int), 124 .maxlen = sizeof(int),
140 .mode = 0644, 125 .mode = 0644,
141 .proc_handler = &proc_dointvec_minmax, 126 .proc_handler = proc_dointvec_minmax,
142 .strategy = &sysctl_intvec,
143 .extra1 = &xfs_params.inherit_noatim.min, 127 .extra1 = &xfs_params.inherit_noatim.min,
144 .extra2 = &xfs_params.inherit_noatim.max 128 .extra2 = &xfs_params.inherit_noatim.max
145 }, 129 },
146 { 130 {
147 .ctl_name = XFS_BUF_TIMER,
148 .procname = "xfsbufd_centisecs", 131 .procname = "xfsbufd_centisecs",
149 .data = &xfs_params.xfs_buf_timer.val, 132 .data = &xfs_params.xfs_buf_timer.val,
150 .maxlen = sizeof(int), 133 .maxlen = sizeof(int),
151 .mode = 0644, 134 .mode = 0644,
152 .proc_handler = &proc_dointvec_minmax, 135 .proc_handler = proc_dointvec_minmax,
153 .strategy = &sysctl_intvec,
154 .extra1 = &xfs_params.xfs_buf_timer.min, 136 .extra1 = &xfs_params.xfs_buf_timer.min,
155 .extra2 = &xfs_params.xfs_buf_timer.max 137 .extra2 = &xfs_params.xfs_buf_timer.max
156 }, 138 },
157 { 139 {
158 .ctl_name = XFS_BUF_AGE,
159 .procname = "age_buffer_centisecs", 140 .procname = "age_buffer_centisecs",
160 .data = &xfs_params.xfs_buf_age.val, 141 .data = &xfs_params.xfs_buf_age.val,
161 .maxlen = sizeof(int), 142 .maxlen = sizeof(int),
162 .mode = 0644, 143 .mode = 0644,
163 .proc_handler = &proc_dointvec_minmax, 144 .proc_handler = proc_dointvec_minmax,
164 .strategy = &sysctl_intvec,
165 .extra1 = &xfs_params.xfs_buf_age.min, 145 .extra1 = &xfs_params.xfs_buf_age.min,
166 .extra2 = &xfs_params.xfs_buf_age.max 146 .extra2 = &xfs_params.xfs_buf_age.max
167 }, 147 },
168 { 148 {
169 .ctl_name = XFS_INHERIT_NOSYM,
170 .procname = "inherit_nosymlinks", 149 .procname = "inherit_nosymlinks",
171 .data = &xfs_params.inherit_nosym.val, 150 .data = &xfs_params.inherit_nosym.val,
172 .maxlen = sizeof(int), 151 .maxlen = sizeof(int),
173 .mode = 0644, 152 .mode = 0644,
174 .proc_handler = &proc_dointvec_minmax, 153 .proc_handler = proc_dointvec_minmax,
175 .strategy = &sysctl_intvec,
176 .extra1 = &xfs_params.inherit_nosym.min, 154 .extra1 = &xfs_params.inherit_nosym.min,
177 .extra2 = &xfs_params.inherit_nosym.max 155 .extra2 = &xfs_params.inherit_nosym.max
178 }, 156 },
179 { 157 {
180 .ctl_name = XFS_ROTORSTEP,
181 .procname = "rotorstep", 158 .procname = "rotorstep",
182 .data = &xfs_params.rotorstep.val, 159 .data = &xfs_params.rotorstep.val,
183 .maxlen = sizeof(int), 160 .maxlen = sizeof(int),
184 .mode = 0644, 161 .mode = 0644,
185 .proc_handler = &proc_dointvec_minmax, 162 .proc_handler = proc_dointvec_minmax,
186 .strategy = &sysctl_intvec,
187 .extra1 = &xfs_params.rotorstep.min, 163 .extra1 = &xfs_params.rotorstep.min,
188 .extra2 = &xfs_params.rotorstep.max 164 .extra2 = &xfs_params.rotorstep.max
189 }, 165 },
190 { 166 {
191 .ctl_name = XFS_INHERIT_NODFRG,
192 .procname = "inherit_nodefrag", 167 .procname = "inherit_nodefrag",
193 .data = &xfs_params.inherit_nodfrg.val, 168 .data = &xfs_params.inherit_nodfrg.val,
194 .maxlen = sizeof(int), 169 .maxlen = sizeof(int),
195 .mode = 0644, 170 .mode = 0644,
196 .proc_handler = &proc_dointvec_minmax, 171 .proc_handler = proc_dointvec_minmax,
197 .strategy = &sysctl_intvec,
198 .extra1 = &xfs_params.inherit_nodfrg.min, 172 .extra1 = &xfs_params.inherit_nodfrg.min,
199 .extra2 = &xfs_params.inherit_nodfrg.max 173 .extra2 = &xfs_params.inherit_nodfrg.max
200 }, 174 },
201 { 175 {
202 .ctl_name = XFS_FILESTREAM_TIMER,
203 .procname = "filestream_centisecs", 176 .procname = "filestream_centisecs",
204 .data = &xfs_params.fstrm_timer.val, 177 .data = &xfs_params.fstrm_timer.val,
205 .maxlen = sizeof(int), 178 .maxlen = sizeof(int),
206 .mode = 0644, 179 .mode = 0644,
207 .proc_handler = &proc_dointvec_minmax, 180 .proc_handler = proc_dointvec_minmax,
208 .strategy = &sysctl_intvec,
209 .extra1 = &xfs_params.fstrm_timer.min, 181 .extra1 = &xfs_params.fstrm_timer.min,
210 .extra2 = &xfs_params.fstrm_timer.max, 182 .extra2 = &xfs_params.fstrm_timer.max,
211 }, 183 },
212 /* please keep this the last entry */ 184 /* please keep this the last entry */
213#ifdef CONFIG_PROC_FS 185#ifdef CONFIG_PROC_FS
214 { 186 {
215 .ctl_name = XFS_STATS_CLEAR,
216 .procname = "stats_clear", 187 .procname = "stats_clear",
217 .data = &xfs_params.stats_clear.val, 188 .data = &xfs_params.stats_clear.val,
218 .maxlen = sizeof(int), 189 .maxlen = sizeof(int),
219 .mode = 0644, 190 .mode = 0644,
220 .proc_handler = &xfs_stats_clear_proc_handler, 191 .proc_handler = xfs_stats_clear_proc_handler,
221 .strategy = &sysctl_intvec,
222 .extra1 = &xfs_params.stats_clear.min, 192 .extra1 = &xfs_params.stats_clear.min,
223 .extra2 = &xfs_params.stats_clear.max 193 .extra2 = &xfs_params.stats_clear.max
224 }, 194 },
@@ -229,7 +199,6 @@ static ctl_table xfs_table[] = {
229 199
230static ctl_table xfs_dir_table[] = { 200static ctl_table xfs_dir_table[] = {
231 { 201 {
232 .ctl_name = FS_XFS,
233 .procname = "xfs", 202 .procname = "xfs",
234 .mode = 0555, 203 .mode = 0555,
235 .child = xfs_table 204 .child = xfs_table
@@ -239,7 +208,6 @@ static ctl_table xfs_dir_table[] = {
239 208
240static ctl_table xfs_root_table[] = { 209static ctl_table xfs_root_table[] = {
241 { 210 {
242 .ctl_name = CTL_FS,
243 .procname = "fs", 211 .procname = "fs",
244 .mode = 0555, 212 .mode = 0555,
245 .child = xfs_dir_table 213 .child = xfs_dir_table
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 6533ead9b889..a2c16bcee90b 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -98,7 +98,7 @@ typedef struct xfs_dquot {
98#define dq_flags q_lists.dqm_flags 98#define dq_flags q_lists.dqm_flags
99 99
100/* 100/*
101 * Lock hierachy for q_qlock: 101 * Lock hierarchy for q_qlock:
102 * XFS_QLOCK_NORMAL is the implicit default, 102 * XFS_QLOCK_NORMAL is the implicit default,
103 * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 103 * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
104 */ 104 */