aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-10-13 09:43:54 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2007-10-13 09:43:54 -0400
commitb160292cc216a50fd0cd386b0bda2cd48352c73b (patch)
treeef07cf98f91353ee4c9ec1e1ca7a2a5d9d4b538a /fs
parentb37bde147890c8fea8369a5a4e230dabdea4ebfb (diff)
parentbbf25010f1a6b761914430f5fca081ec8c7accd1 (diff)
Merge Linux 2.6.23
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/fid.c17
-rw-r--r--fs/9p/fid.h1
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/afs/mntpt.c2
-rw-r--r--fs/aio.c2
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/compat_ioctl.c24
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/ecryptfs/mmap.c3
-rw-r--r--fs/exec.c3
-rw-r--r--fs/ext3/namei.c73
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/namei.c73
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/hugetlbfs/inode.c15
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/lockd/svclock.c31
-rw-r--r--fs/locks.c2
-rw-r--r--fs/nfs/client.c29
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/super.c132
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nfsd/nfsfh.c20
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/ocfs2/alloc.c1
-rw-r--r--fs/ocfs2/aops.c37
-rw-r--r--fs/ocfs2/file.c5
-rw-r--r--fs/ocfs2/localalloc.c8
-rw-r--r--fs/ocfs2/localalloc.h2
-rw-r--r--fs/ocfs2/suballoc.c29
-rw-r--r--fs/ocfs2/suballoc.h11
-rw-r--r--fs/ocfs2/super.c69
-rw-r--r--fs/ocfs2/vote.c4
-rw-r--r--fs/proc/array.c44
-rw-r--r--fs/proc/inode.c3
-rw-r--r--fs/reiserfs/super.c13
-rw-r--r--fs/select.c2
-rw-r--r--fs/signalfd.c190
-rw-r--r--fs/splice.c46
-rw-r--r--fs/sysfs/bin.c7
-rw-r--r--fs/sysfs/dir.c21
-rw-r--r--fs/udf/balloc.c10
-rw-r--r--fs/udf/super.c26
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/xfs/linux-2.6/kmem.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c3
-rw-r--r--fs/xfs/support/debug.h10
-rw-r--r--fs/xfs/xfs_da_btree.c1
-rw-r--r--fs/xfs/xfs_filestream.c10
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mru_cache.c72
-rw-r--r--fs/xfs/xfs_mru_cache.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c20
61 files changed, 698 insertions, 521 deletions
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 08fa320b7e6d..15e05a15b575 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -92,23 +92,6 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
92 return fid; 92 return fid;
93} 93}
94 94
95struct p9_fid *v9fs_fid_lookup_remove(struct dentry *dentry)
96{
97 struct p9_fid *fid;
98 struct v9fs_dentry *dent;
99
100 dent = dentry->d_fsdata;
101 fid = v9fs_fid_lookup(dentry);
102 if (!IS_ERR(fid)) {
103 spin_lock(&dent->lock);
104 list_del(&fid->dlist);
105 spin_unlock(&dent->lock);
106 }
107
108 return fid;
109}
110
111
112/** 95/**
113 * v9fs_fid_clone - lookup the fid for a dentry, clone a private copy and 96 * v9fs_fid_clone - lookup the fid for a dentry, clone a private copy and
114 * release it 97 * release it
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 47a0ba742872..26e07df783b9 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -28,6 +28,5 @@ struct v9fs_dentry {
28}; 28};
29 29
30struct p9_fid *v9fs_fid_lookup(struct dentry *dentry); 30struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
31struct p9_fid *v9fs_fid_lookup_remove(struct dentry *dentry);
32struct p9_fid *v9fs_fid_clone(struct dentry *dentry); 31struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
33int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid); 32int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
diff --git a/fs/Kconfig b/fs/Kconfig
index 84fb8428c023..bb02b39380a3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -441,9 +441,6 @@ config OCFS2_FS
441 441
442 Note: Features which OCFS2 does not support yet: 442 Note: Features which OCFS2 does not support yet:
443 - extended attributes 443 - extended attributes
444 - shared writeable mmap
445 - loopback is supported, but data written will not
446 be cluster coherent.
447 - quotas 444 - quotas
448 - cluster aware flock 445 - cluster aware flock
449 - Directory change notification (F_NOTIFY) 446 - Directory change notification (F_NOTIFY)
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a3684dcc76e7..6f8c96fb29eb 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
235 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); 235 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
236 switch (err) { 236 switch (err) {
237 case 0: 237 case 0:
238 mntput(nd->mnt);
239 dput(nd->dentry); 238 dput(nd->dentry);
239 mntput(nd->mnt);
240 nd->mnt = newmnt; 240 nd->mnt = newmnt;
241 nd->dentry = dget(newmnt->mnt_root); 241 nd->dentry = dget(newmnt->mnt_root);
242 schedule_delayed_work(&afs_mntpt_expiry_timer, 242 schedule_delayed_work(&afs_mntpt_expiry_timer,
diff --git a/fs/aio.c b/fs/aio.c
index dbe699e9828c..ea2e19820381 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1562,6 +1562,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1562 fput(file); 1562 fput(file);
1563 return -EAGAIN; 1563 return -EAGAIN;
1564 } 1564 }
1565 req->ki_filp = file;
1565 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1566 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1566 /* 1567 /*
1567 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1568 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -1576,7 +1577,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1576 } 1577 }
1577 } 1578 }
1578 1579
1579 req->ki_filp = file;
1580 ret = put_user(req->ki_key, &user_iocb->aio_key); 1580 ret = put_user(req->ki_key, &user_iocb->aio_key);
1581 if (unlikely(ret)) { 1581 if (unlikely(ret)) {
1582 dprintk("EFAULT: aio_key\n"); 1582 dprintk("EFAULT: aio_key\n");
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 861141b4f6d6..fcb3405bb14e 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -742,6 +742,7 @@ static int load_flat_file(struct linux_binprm * bprm,
742 * __start to address 4 so that is okay). 742 * __start to address 4 so that is okay).
743 */ 743 */
744 if (rev > OLD_FLAT_VERSION) { 744 if (rev > OLD_FLAT_VERSION) {
745 unsigned long persistent = 0;
745 for (i=0; i < relocs; i++) { 746 for (i=0; i < relocs; i++) {
746 unsigned long addr, relval; 747 unsigned long addr, relval;
747 748
@@ -749,6 +750,8 @@ static int load_flat_file(struct linux_binprm * bprm,
749 relocated (of course, the address has to be 750 relocated (of course, the address has to be
750 relocated first). */ 751 relocated first). */
751 relval = ntohl(reloc[i]); 752 relval = ntohl(reloc[i]);
753 if (flat_set_persistent (relval, &persistent))
754 continue;
752 addr = flat_get_relocate_addr(relval); 755 addr = flat_get_relocate_addr(relval);
753 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); 756 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
754 if (rp == (unsigned long *)RELOC_FAILED) { 757 if (rp == (unsigned long *)RELOC_FAILED) {
@@ -757,7 +760,8 @@ static int load_flat_file(struct linux_binprm * bprm,
757 } 760 }
758 761
759 /* Get the pointer's value. */ 762 /* Get the pointer's value. */
760 addr = flat_get_addr_from_rp(rp, relval, flags); 763 addr = flat_get_addr_from_rp(rp, relval, flags,
764 &persistent);
761 if (addr != 0) { 765 if (addr != 0) {
762 /* 766 /*
763 * Do the relocation. PIC relocs in the data section are 767 * Do the relocation. PIC relocs in the data section are
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a6c9078af124..37310b0e8107 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -2311,8 +2311,10 @@ static int do_wireless_ioctl(unsigned int fd, unsigned int cmd, unsigned long ar
2311 struct iwreq __user *iwr_u; 2311 struct iwreq __user *iwr_u;
2312 struct iw_point __user *iwp; 2312 struct iw_point __user *iwp;
2313 struct compat_iw_point __user *iwp_u; 2313 struct compat_iw_point __user *iwp_u;
2314 compat_caddr_t pointer; 2314 compat_caddr_t pointer_u;
2315 void __user *pointer;
2315 __u16 length, flags; 2316 __u16 length, flags;
2317 int ret;
2316 2318
2317 iwr_u = compat_ptr(arg); 2319 iwr_u = compat_ptr(arg);
2318 iwp_u = (struct compat_iw_point __user *) &iwr_u->u.data; 2320 iwp_u = (struct compat_iw_point __user *) &iwr_u->u.data;
@@ -2330,17 +2332,29 @@ static int do_wireless_ioctl(unsigned int fd, unsigned int cmd, unsigned long ar
2330 sizeof(iwr->ifr_ifrn.ifrn_name))) 2332 sizeof(iwr->ifr_ifrn.ifrn_name)))
2331 return -EFAULT; 2333 return -EFAULT;
2332 2334
2333 if (__get_user(pointer, &iwp_u->pointer) || 2335 if (__get_user(pointer_u, &iwp_u->pointer) ||
2334 __get_user(length, &iwp_u->length) || 2336 __get_user(length, &iwp_u->length) ||
2335 __get_user(flags, &iwp_u->flags)) 2337 __get_user(flags, &iwp_u->flags))
2336 return -EFAULT; 2338 return -EFAULT;
2337 2339
2338 if (__put_user(compat_ptr(pointer), &iwp->pointer) || 2340 if (__put_user(compat_ptr(pointer_u), &iwp->pointer) ||
2339 __put_user(length, &iwp->length) || 2341 __put_user(length, &iwp->length) ||
2340 __put_user(flags, &iwp->flags)) 2342 __put_user(flags, &iwp->flags))
2341 return -EFAULT; 2343 return -EFAULT;
2342 2344
2343 return sys_ioctl(fd, cmd, (unsigned long) iwr); 2345 ret = sys_ioctl(fd, cmd, (unsigned long) iwr);
2346
2347 if (__get_user(pointer, &iwp->pointer) ||
2348 __get_user(length, &iwp->length) ||
2349 __get_user(flags, &iwp->flags))
2350 return -EFAULT;
2351
2352 if (__put_user(ptr_to_compat(pointer), &iwp_u->pointer) ||
2353 __put_user(length, &iwp_u->length) ||
2354 __put_user(flags, &iwp_u->flags))
2355 return -EFAULT;
2356
2357 return ret;
2344} 2358}
2345 2359
2346/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE 2360/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
@@ -3176,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
3176COMPATIBLE_IOCTL(SIOCGIWRETRY) 3190COMPATIBLE_IOCTL(SIOCGIWRETRY)
3177COMPATIBLE_IOCTL(SIOCSIWPOWER) 3191COMPATIBLE_IOCTL(SIOCSIWPOWER)
3178COMPATIBLE_IOCTL(SIOCGIWPOWER) 3192COMPATIBLE_IOCTL(SIOCGIWPOWER)
3193COMPATIBLE_IOCTL(SIOCSIWAUTH)
3194COMPATIBLE_IOCTL(SIOCGIWAUTH)
3179/* hiddev */ 3195/* hiddev */
3180COMPATIBLE_IOCTL(HIDIOCGVERSION) 3196COMPATIBLE_IOCTL(HIDIOCGVERSION)
3181COMPATIBLE_IOCTL(HIDIOCAPPLICATION) 3197COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 5d40ad13ab5c..131954b3fb98 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -357,10 +357,6 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
357 ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); 357 ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
358 goto out; 358 goto out;
359 } 359 }
360 if (special_file(lower_inode->i_mode)) {
361 ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
362 goto out;
363 }
364 if (!nd) { 360 if (!nd) {
365 ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave" 361 ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
366 "as we *think* we are about to unlink\n"); 362 "as we *think* we are about to unlink\n");
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index e4ab7bc14efe..fd3f94d4a668 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -834,7 +834,8 @@ static void ecryptfs_sync_page(struct page *page)
834 ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n"); 834 ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
835 return; 835 return;
836 } 836 }
837 lower_page->mapping->a_ops->sync_page(lower_page); 837 if (lower_page->mapping->a_ops->sync_page)
838 lower_page->mapping->a_ops->sync_page(lower_page);
838 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n", 839 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
839 lower_page->index); 840 lower_page->index);
840 unlock_page(lower_page); 841 unlock_page(lower_page);
diff --git a/fs/exec.c b/fs/exec.c
index c21a8cc06277..073b0b8c6d05 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -50,7 +50,6 @@
50#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
51#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
52#include <linux/audit.h> 52#include <linux/audit.h>
53#include <linux/signalfd.h>
54 53
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <asm/mmu_context.h> 55#include <asm/mmu_context.h>
@@ -784,7 +783,6 @@ static int de_thread(struct task_struct *tsk)
784 * and we can just re-use it all. 783 * and we can just re-use it all.
785 */ 784 */
786 if (atomic_read(&oldsighand->count) <= 1) { 785 if (atomic_read(&oldsighand->count) <= 1) {
787 signalfd_detach(tsk);
788 exit_itimers(sig); 786 exit_itimers(sig);
789 return 0; 787 return 0;
790 } 788 }
@@ -923,7 +921,6 @@ static int de_thread(struct task_struct *tsk)
923 sig->flags = 0; 921 sig->flags = 0;
924 922
925no_thread_group: 923no_thread_group:
926 signalfd_detach(tsk);
927 exit_itimers(sig); 924 exit_itimers(sig);
928 if (leader) 925 if (leader)
929 release_task(leader); 926 release_task(leader);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 1586807b8177..c1fa1908dba0 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -140,7 +140,8 @@ struct dx_frame
140struct dx_map_entry 140struct dx_map_entry
141{ 141{
142 u32 hash; 142 u32 hash;
143 u32 offs; 143 u16 offs;
144 u16 size;
144}; 145};
145 146
146#ifdef CONFIG_EXT3_INDEX 147#ifdef CONFIG_EXT3_INDEX
@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
379 380
380 entries = (struct dx_entry *) (((char *)&root->info) + 381 entries = (struct dx_entry *) (((char *)&root->info) +
381 root->info.info_length); 382 root->info.info_length);
382 assert(dx_get_limit(entries) == dx_root_limit(dir, 383
383 root->info.info_length)); 384 if (dx_get_limit(entries) != dx_root_limit(dir,
385 root->info.info_length)) {
386 ext3_warning(dir->i_sb, __FUNCTION__,
387 "dx entry: limit != root limit");
388 brelse(bh);
389 *err = ERR_BAD_DX_DIR;
390 goto fail;
391 }
392
384 dxtrace (printk("Look up %x", hash)); 393 dxtrace (printk("Look up %x", hash));
385 while (1) 394 while (1)
386 { 395 {
387 count = dx_get_count(entries); 396 count = dx_get_count(entries);
388 assert (count && count <= dx_get_limit(entries)); 397 if (!count || count > dx_get_limit(entries)) {
398 ext3_warning(dir->i_sb, __FUNCTION__,
399 "dx entry: no count or count > limit");
400 brelse(bh);
401 *err = ERR_BAD_DX_DIR;
402 goto fail2;
403 }
404
389 p = entries + 1; 405 p = entries + 1;
390 q = entries + count - 1; 406 q = entries + count - 1;
391 while (p <= q) 407 while (p <= q)
@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
423 if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err))) 439 if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
424 goto fail2; 440 goto fail2;
425 at = entries = ((struct dx_node *) bh->b_data)->entries; 441 at = entries = ((struct dx_node *) bh->b_data)->entries;
426 assert (dx_get_limit(entries) == dx_node_limit (dir)); 442 if (dx_get_limit(entries) != dx_node_limit (dir)) {
443 ext3_warning(dir->i_sb, __FUNCTION__,
444 "dx entry: limit != node limit");
445 brelse(bh);
446 *err = ERR_BAD_DX_DIR;
447 goto fail2;
448 }
427 frame++; 449 frame++;
450 frame->bh = NULL;
428 } 451 }
429fail2: 452fail2:
430 while (frame >= frame_in) { 453 while (frame >= frame_in) {
@@ -432,6 +455,10 @@ fail2:
432 frame--; 455 frame--;
433 } 456 }
434fail: 457fail:
458 if (*err == ERR_BAD_DX_DIR)
459 ext3_warning(dir->i_sb, __FUNCTION__,
460 "Corrupt dir inode %ld, running e2fsck is "
461 "recommended.", dir->i_ino);
435 return NULL; 462 return NULL;
436} 463}
437 464
@@ -671,6 +698,10 @@ errout:
671 * Directory block splitting, compacting 698 * Directory block splitting, compacting
672 */ 699 */
673 700
701/*
702 * Create map of hash values, offsets, and sizes, stored at end of block.
703 * Returns number of entries mapped.
704 */
674static int dx_make_map (struct ext3_dir_entry_2 *de, int size, 705static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
675 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) 706 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
676{ 707{
@@ -684,7 +715,8 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
684 ext3fs_dirhash(de->name, de->name_len, &h); 715 ext3fs_dirhash(de->name, de->name_len, &h);
685 map_tail--; 716 map_tail--;
686 map_tail->hash = h.hash; 717 map_tail->hash = h.hash;
687 map_tail->offs = (u32) ((char *) de - base); 718 map_tail->offs = (u16) ((char *) de - base);
719 map_tail->size = le16_to_cpu(de->rec_len);
688 count++; 720 count++;
689 cond_resched(); 721 cond_resched();
690 } 722 }
@@ -694,6 +726,7 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
694 return count; 726 return count;
695} 727}
696 728
729/* Sort map by hash value */
697static void dx_sort_map (struct dx_map_entry *map, unsigned count) 730static void dx_sort_map (struct dx_map_entry *map, unsigned count)
698{ 731{
699 struct dx_map_entry *p, *q, *top = map + count - 1; 732 struct dx_map_entry *p, *q, *top = map + count - 1;
@@ -1091,6 +1124,10 @@ static inline void ext3_set_de_type(struct super_block *sb,
1091} 1124}
1092 1125
1093#ifdef CONFIG_EXT3_INDEX 1126#ifdef CONFIG_EXT3_INDEX
1127/*
1128 * Move count entries from end of map between two memory locations.
1129 * Returns pointer to last entry moved.
1130 */
1094static struct ext3_dir_entry_2 * 1131static struct ext3_dir_entry_2 *
1095dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) 1132dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
1096{ 1133{
@@ -1109,6 +1146,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
1109 return (struct ext3_dir_entry_2 *) (to - rec_len); 1146 return (struct ext3_dir_entry_2 *) (to - rec_len);
1110} 1147}
1111 1148
1149/*
1150 * Compact each dir entry in the range to the minimal rec_len.
1151 * Returns pointer to last entry in range.
1152 */
1112static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size) 1153static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
1113{ 1154{
1114 struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base; 1155 struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
@@ -1131,6 +1172,11 @@ static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
1131 return prev; 1172 return prev;
1132} 1173}
1133 1174
1175/*
1176 * Split a full leaf block to make room for a new dir entry.
1177 * Allocate a new block, and move entries so that they are approx. equally full.
1178 * Returns pointer to de in block into which the new entry will be inserted.
1179 */
1134static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, 1180static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1135 struct buffer_head **bh,struct dx_frame *frame, 1181 struct buffer_head **bh,struct dx_frame *frame,
1136 struct dx_hash_info *hinfo, int *error) 1182 struct dx_hash_info *hinfo, int *error)
@@ -1142,7 +1188,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1142 u32 hash2; 1188 u32 hash2;
1143 struct dx_map_entry *map; 1189 struct dx_map_entry *map;
1144 char *data1 = (*bh)->b_data, *data2; 1190 char *data1 = (*bh)->b_data, *data2;
1145 unsigned split; 1191 unsigned split, move, size, i;
1146 struct ext3_dir_entry_2 *de = NULL, *de2; 1192 struct ext3_dir_entry_2 *de = NULL, *de2;
1147 int err = 0; 1193 int err = 0;
1148 1194
@@ -1170,8 +1216,19 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1170 count = dx_make_map ((struct ext3_dir_entry_2 *) data1, 1216 count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
1171 blocksize, hinfo, map); 1217 blocksize, hinfo, map);
1172 map -= count; 1218 map -= count;
1173 split = count/2; // need to adjust to actual middle
1174 dx_sort_map (map, count); 1219 dx_sort_map (map, count);
1220 /* Split the existing block in the middle, size-wise */
1221 size = 0;
1222 move = 0;
1223 for (i = count-1; i >= 0; i--) {
1224 /* is more than half of this entry in 2nd half of the block? */
1225 if (size + map[i].size/2 > blocksize/2)
1226 break;
1227 size += map[i].size;
1228 move++;
1229 }
1230 /* map index at which we will split */
1231 split = count - move;
1175 hash2 = map[split].hash; 1232 hash2 = map[split].hash;
1176 continued = hash2 == map[split - 1].hash; 1233 continued = hash2 == map[split - 1].hash;
1177 dxtrace(printk("Split block %i at %x, %i/%i\n", 1234 dxtrace(printk("Split block %i at %x, %i/%i\n",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 22cfdd61c060..9537316a0714 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2578,8 +2578,11 @@ static int ext3_release_dquot(struct dquot *dquot)
2578 2578
2579 handle = ext3_journal_start(dquot_to_inode(dquot), 2579 handle = ext3_journal_start(dquot_to_inode(dquot),
2580 EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 2580 EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb));
2581 if (IS_ERR(handle)) 2581 if (IS_ERR(handle)) {
2582 /* Release dquot anyway to avoid endless cycle in dqput() */
2583 dquot_release(dquot);
2582 return PTR_ERR(handle); 2584 return PTR_ERR(handle);
2585 }
2583 ret = dquot_release(dquot); 2586 ret = dquot_release(dquot);
2584 err = ext3_journal_stop(handle); 2587 err = ext3_journal_stop(handle);
2585 if (!ret) 2588 if (!ret)
@@ -2712,6 +2715,12 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
2712 struct buffer_head *bh; 2715 struct buffer_head *bh;
2713 handle_t *handle = journal_current_handle(); 2716 handle_t *handle = journal_current_handle();
2714 2717
2718 if (!handle) {
2719 printk(KERN_WARNING "EXT3-fs: Quota write (off=%Lu, len=%Lu)"
2720 " cancelled because transaction is not started.\n",
2721 (unsigned long long)off, (unsigned long long)len);
2722 return -EIO;
2723 }
2715 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2724 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2716 while (towrite > 0) { 2725 while (towrite > 0) {
2717 tocopy = sb->s_blocksize - offset < towrite ? 2726 tocopy = sb->s_blocksize - offset < towrite ?
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index da224974af78..5fdb862e71c4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -140,7 +140,8 @@ struct dx_frame
140struct dx_map_entry 140struct dx_map_entry
141{ 141{
142 u32 hash; 142 u32 hash;
143 u32 offs; 143 u16 offs;
144 u16 size;
144}; 145};
145 146
146#ifdef CONFIG_EXT4_INDEX 147#ifdef CONFIG_EXT4_INDEX
@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
379 380
380 entries = (struct dx_entry *) (((char *)&root->info) + 381 entries = (struct dx_entry *) (((char *)&root->info) +
381 root->info.info_length); 382 root->info.info_length);
382 assert(dx_get_limit(entries) == dx_root_limit(dir, 383
383 root->info.info_length)); 384 if (dx_get_limit(entries) != dx_root_limit(dir,
385 root->info.info_length)) {
386 ext4_warning(dir->i_sb, __FUNCTION__,
387 "dx entry: limit != root limit");
388 brelse(bh);
389 *err = ERR_BAD_DX_DIR;
390 goto fail;
391 }
392
384 dxtrace (printk("Look up %x", hash)); 393 dxtrace (printk("Look up %x", hash));
385 while (1) 394 while (1)
386 { 395 {
387 count = dx_get_count(entries); 396 count = dx_get_count(entries);
388 assert (count && count <= dx_get_limit(entries)); 397 if (!count || count > dx_get_limit(entries)) {
398 ext4_warning(dir->i_sb, __FUNCTION__,
399 "dx entry: no count or count > limit");
400 brelse(bh);
401 *err = ERR_BAD_DX_DIR;
402 goto fail2;
403 }
404
389 p = entries + 1; 405 p = entries + 1;
390 q = entries + count - 1; 406 q = entries + count - 1;
391 while (p <= q) 407 while (p <= q)
@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
423 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) 439 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
424 goto fail2; 440 goto fail2;
425 at = entries = ((struct dx_node *) bh->b_data)->entries; 441 at = entries = ((struct dx_node *) bh->b_data)->entries;
426 assert (dx_get_limit(entries) == dx_node_limit (dir)); 442 if (dx_get_limit(entries) != dx_node_limit (dir)) {
443 ext4_warning(dir->i_sb, __FUNCTION__,
444 "dx entry: limit != node limit");
445 brelse(bh);
446 *err = ERR_BAD_DX_DIR;
447 goto fail2;
448 }
427 frame++; 449 frame++;
450 frame->bh = NULL;
428 } 451 }
429fail2: 452fail2:
430 while (frame >= frame_in) { 453 while (frame >= frame_in) {
@@ -432,6 +455,10 @@ fail2:
432 frame--; 455 frame--;
433 } 456 }
434fail: 457fail:
458 if (*err == ERR_BAD_DX_DIR)
459 ext4_warning(dir->i_sb, __FUNCTION__,
460 "Corrupt dir inode %ld, running e2fsck is "
461 "recommended.", dir->i_ino);
435 return NULL; 462 return NULL;
436} 463}
437 464
@@ -671,6 +698,10 @@ errout:
671 * Directory block splitting, compacting 698 * Directory block splitting, compacting
672 */ 699 */
673 700
701/*
702 * Create map of hash values, offsets, and sizes, stored at end of block.
703 * Returns number of entries mapped.
704 */
674static int dx_make_map (struct ext4_dir_entry_2 *de, int size, 705static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
675 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) 706 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
676{ 707{
@@ -684,7 +715,8 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
684 ext4fs_dirhash(de->name, de->name_len, &h); 715 ext4fs_dirhash(de->name, de->name_len, &h);
685 map_tail--; 716 map_tail--;
686 map_tail->hash = h.hash; 717 map_tail->hash = h.hash;
687 map_tail->offs = (u32) ((char *) de - base); 718 map_tail->offs = (u16) ((char *) de - base);
719 map_tail->size = le16_to_cpu(de->rec_len);
688 count++; 720 count++;
689 cond_resched(); 721 cond_resched();
690 } 722 }
@@ -694,6 +726,7 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
694 return count; 726 return count;
695} 727}
696 728
729/* Sort map by hash value */
697static void dx_sort_map (struct dx_map_entry *map, unsigned count) 730static void dx_sort_map (struct dx_map_entry *map, unsigned count)
698{ 731{
699 struct dx_map_entry *p, *q, *top = map + count - 1; 732 struct dx_map_entry *p, *q, *top = map + count - 1;
@@ -1089,6 +1122,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
1089} 1122}
1090 1123
1091#ifdef CONFIG_EXT4_INDEX 1124#ifdef CONFIG_EXT4_INDEX
1125/*
1126 * Move count entries from end of map between two memory locations.
1127 * Returns pointer to last entry moved.
1128 */
1092static struct ext4_dir_entry_2 * 1129static struct ext4_dir_entry_2 *
1093dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) 1130dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
1094{ 1131{
@@ -1107,6 +1144,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
1107 return (struct ext4_dir_entry_2 *) (to - rec_len); 1144 return (struct ext4_dir_entry_2 *) (to - rec_len);
1108} 1145}
1109 1146
1147/*
1148 * Compact each dir entry in the range to the minimal rec_len.
1149 * Returns pointer to last entry in range.
1150 */
1110static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size) 1151static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
1111{ 1152{
1112 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; 1153 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
@@ -1129,6 +1170,11 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
1129 return prev; 1170 return prev;
1130} 1171}
1131 1172
1173/*
1174 * Split a full leaf block to make room for a new dir entry.
1175 * Allocate a new block, and move entries so that they are approx. equally full.
1176 * Returns pointer to de in block into which the new entry will be inserted.
1177 */
1132static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, 1178static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1133 struct buffer_head **bh,struct dx_frame *frame, 1179 struct buffer_head **bh,struct dx_frame *frame,
1134 struct dx_hash_info *hinfo, int *error) 1180 struct dx_hash_info *hinfo, int *error)
@@ -1140,7 +1186,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1140 u32 hash2; 1186 u32 hash2;
1141 struct dx_map_entry *map; 1187 struct dx_map_entry *map;
1142 char *data1 = (*bh)->b_data, *data2; 1188 char *data1 = (*bh)->b_data, *data2;
1143 unsigned split; 1189 unsigned split, move, size, i;
1144 struct ext4_dir_entry_2 *de = NULL, *de2; 1190 struct ext4_dir_entry_2 *de = NULL, *de2;
1145 int err = 0; 1191 int err = 0;
1146 1192
@@ -1168,8 +1214,19 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1168 count = dx_make_map ((struct ext4_dir_entry_2 *) data1, 1214 count = dx_make_map ((struct ext4_dir_entry_2 *) data1,
1169 blocksize, hinfo, map); 1215 blocksize, hinfo, map);
1170 map -= count; 1216 map -= count;
1171 split = count/2; // need to adjust to actual middle
1172 dx_sort_map (map, count); 1217 dx_sort_map (map, count);
1218 /* Split the existing block in the middle, size-wise */
1219 size = 0;
1220 move = 0;
1221 for (i = count-1; i >= 0; i--) {
1222 /* is more than half of this entry in 2nd half of the block? */
1223 if (size + map[i].size/2 > blocksize/2)
1224 break;
1225 size += map[i].size;
1226 move++;
1227 }
1228 /* map index at which we will split */
1229 split = count - move;
1173 hash2 = map[split].hash; 1230 hash2 = map[split].hash;
1174 continued = hash2 == map[split - 1].hash; 1231 continued = hash2 == map[split - 1].hash;
1175 dxtrace(printk("Split block %i at %x, %i/%i\n", 1232 dxtrace(printk("Split block %i at %x, %i/%i\n",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4550b83ab1c9..3c1397fa83df 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2698,8 +2698,11 @@ static int ext4_release_dquot(struct dquot *dquot)
2698 2698
2699 handle = ext4_journal_start(dquot_to_inode(dquot), 2699 handle = ext4_journal_start(dquot_to_inode(dquot),
2700 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 2700 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
2701 if (IS_ERR(handle)) 2701 if (IS_ERR(handle)) {
2702 /* Release dquot anyway to avoid endless cycle in dqput() */
2703 dquot_release(dquot);
2702 return PTR_ERR(handle); 2704 return PTR_ERR(handle);
2705 }
2703 ret = dquot_release(dquot); 2706 ret = dquot_release(dquot);
2704 err = ext4_journal_stop(handle); 2707 err = ext4_journal_stop(handle);
2705 if (!ret) 2708 if (!ret)
@@ -2832,6 +2835,12 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
2832 struct buffer_head *bh; 2835 struct buffer_head *bh;
2833 handle_t *handle = journal_current_handle(); 2836 handle_t *handle = journal_current_handle();
2834 2837
2838 if (!handle) {
2839 printk(KERN_WARNING "EXT4-fs: Quota write (off=%Lu, len=%Lu)"
2840 " cancelled because transaction is not started.\n",
2841 (unsigned long long)off, (unsigned long long)len);
2842 return -EIO;
2843 }
2835 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2844 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2836 while (towrite > 0) { 2845 while (towrite > 0) {
2837 tocopy = sb->s_blocksize - offset < towrite ? 2846 tocopy = sb->s_blocksize - offset < towrite ?
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index c848a191525d..950c2fbb815b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
82 int ret; 82 int ret;
83 83
84 /* 84 /*
85 * vma alignment has already been checked by prepare_hugepage_range. 85 * vma address alignment (but not the pgoff alignment) has
86 * If you add any error returns here, do so after setting VM_HUGETLB, 86 * already been checked by prepare_hugepage_range. If you add
87 * so is_vm_hugetlb_page tests below unmap_region go the right way 87 * any error returns here, do so after setting VM_HUGETLB, so
88 * when do_mmap_pgoff unwinds (may be important on powerpc and ia64). 88 * is_vm_hugetlb_page tests below unmap_region go the right
89 * way when do_mmap_pgoff unwinds (may be important on powerpc
90 * and ia64).
89 */ 91 */
90 vma->vm_flags |= VM_HUGETLB | VM_RESERVED; 92 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
91 vma->vm_ops = &hugetlb_vm_ops; 93 vma->vm_ops = &hugetlb_vm_ops;
92 94
95 if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
96 return -EINVAL;
97
93 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 98 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
94 99
95 mutex_lock(&inode->i_mutex); 100 mutex_lock(&inode->i_mutex);
@@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
132 return -ENOMEM; 137 return -ENOMEM;
133 138
134 if (flags & MAP_FIXED) { 139 if (flags & MAP_FIXED) {
135 if (prepare_hugepage_range(addr, len, pgoff)) 140 if (prepare_hugepage_range(addr, len))
136 return -EINVAL; 141 return -EINVAL;
137 return addr; 142 return addr;
138 } 143 }
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index dd64ddc11d43..ed85f9afdbc8 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -647,7 +647,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
647 struct inode *inode = OFNI_EDONI_2SFFJ(f); 647 struct inode *inode = OFNI_EDONI_2SFFJ(f);
648 struct page *pg; 648 struct page *pg;
649 649
650 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 650 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
651 (void *)jffs2_do_readpage_unlock, inode); 651 (void *)jffs2_do_readpage_unlock, inode);
652 if (IS_ERR(pg)) 652 if (IS_ERR(pg))
653 return (void *)pg; 653 return (void *)pg;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index a21e4bc5444b..d120ec39bcb0 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -171,19 +171,14 @@ found:
171 * GRANTED_RES message by cookie, without having to rely on the client's IP 171 * GRANTED_RES message by cookie, without having to rely on the client's IP
172 * address. --okir 172 * address. --okir
173 */ 173 */
174static inline struct nlm_block * 174static struct nlm_block *
175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, 175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
176 struct nlm_lock *lock, struct nlm_cookie *cookie) 176 struct nlm_file *file, struct nlm_lock *lock,
177 struct nlm_cookie *cookie)
177{ 178{
178 struct nlm_block *block; 179 struct nlm_block *block;
179 struct nlm_host *host;
180 struct nlm_rqst *call = NULL; 180 struct nlm_rqst *call = NULL;
181 181
182 /* Create host handle for callback */
183 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
184 if (host == NULL)
185 return NULL;
186
187 call = nlm_alloc_call(host); 182 call = nlm_alloc_call(host);
188 if (call == NULL) 183 if (call == NULL)
189 return NULL; 184 return NULL;
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
366 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) 361 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
367{ 362{
368 struct nlm_block *block = NULL; 363 struct nlm_block *block = NULL;
364 struct nlm_host *host;
369 int error; 365 int error;
370 __be32 ret; 366 __be32 ret;
371 367
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
377 (long long)lock->fl.fl_end, 373 (long long)lock->fl.fl_end,
378 wait); 374 wait);
379 375
376 /* Create host handle for callback */
377 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
378 if (host == NULL)
379 return nlm_lck_denied_nolocks;
380 380
381 /* Lock file against concurrent access */ 381 /* Lock file against concurrent access */
382 mutex_lock(&file->f_mutex); 382 mutex_lock(&file->f_mutex);
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
385 */ 385 */
386 block = nlmsvc_lookup_block(file, lock); 386 block = nlmsvc_lookup_block(file, lock);
387 if (block == NULL) { 387 if (block == NULL) {
388 block = nlmsvc_create_block(rqstp, file, lock, cookie); 388 block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
389 lock, cookie);
389 ret = nlm_lck_denied_nolocks; 390 ret = nlm_lck_denied_nolocks;
390 if (block == NULL) 391 if (block == NULL)
391 goto out; 392 goto out;
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
449out: 450out:
450 mutex_unlock(&file->f_mutex); 451 mutex_unlock(&file->f_mutex);
451 nlmsvc_release_block(block); 452 nlmsvc_release_block(block);
453 nlm_release_host(host);
452 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 454 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
453 return ret; 455 return ret;
454} 456}
@@ -477,10 +479,17 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
477 479
478 if (block == NULL) { 480 if (block == NULL) {
479 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 481 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
482 struct nlm_host *host;
480 483
481 if (conf == NULL) 484 if (conf == NULL)
482 return nlm_granted; 485 return nlm_granted;
483 block = nlmsvc_create_block(rqstp, file, lock, cookie); 486 /* Create host handle for callback */
487 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
488 if (host == NULL) {
489 kfree(conf);
490 return nlm_lck_denied_nolocks;
491 }
492 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
484 if (block == NULL) { 493 if (block == NULL) {
485 kfree(conf); 494 kfree(conf);
486 return nlm_granted; 495 return nlm_granted;
diff --git a/fs/locks.c b/fs/locks.c
index 50857d2d3404..c795eaaf6c4c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -782,7 +782,7 @@ find_conflict:
782 if (request->fl_flags & FL_ACCESS) 782 if (request->fl_flags & FL_ACCESS)
783 goto out; 783 goto out;
784 locks_copy_lock(new_fl, request); 784 locks_copy_lock(new_fl, request);
785 locks_insert_lock(&inode->i_flock, new_fl); 785 locks_insert_lock(before, new_fl);
786 new_fl = NULL; 786 new_fl = NULL;
787 error = 0; 787 error = 0;
788 788
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a49f9feff776..a204484072f3 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
588 server->namelen = data->namlen; 588 server->namelen = data->namlen;
589 /* Create a client RPC handle for the NFSv3 ACL management interface */ 589 /* Create a client RPC handle for the NFSv3 ACL management interface */
590 nfs_init_server_aclclient(server); 590 nfs_init_server_aclclient(server);
591 if (clp->cl_nfsversion == 3) {
592 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
593 server->namelen = NFS3_MAXNAMLEN;
594 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
595 server->caps |= NFS_CAP_READDIRPLUS;
596 } else {
597 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
598 server->namelen = NFS2_MAXNAMLEN;
599 }
600
601 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); 591 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
602 return 0; 592 return 0;
603 593
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
794 error = nfs_probe_fsinfo(server, mntfh, &fattr); 784 error = nfs_probe_fsinfo(server, mntfh, &fattr);
795 if (error < 0) 785 if (error < 0)
796 goto error; 786 goto error;
787 if (server->nfs_client->rpc_ops->version == 3) {
788 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
789 server->namelen = NFS3_MAXNAMLEN;
790 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
791 server->caps |= NFS_CAP_READDIRPLUS;
792 } else {
793 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
794 server->namelen = NFS2_MAXNAMLEN;
795 }
796
797 if (!(fattr.valid & NFS_ATTR_FATTR)) { 797 if (!(fattr.valid & NFS_ATTR_FATTR)) {
798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); 798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
799 if (error < 0) { 799 if (error < 0) {
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
984 if (error < 0) 984 if (error < 0)
985 goto error; 985 goto error;
986 986
987 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
988 server->namelen = NFS4_MAXNAMLEN;
989
987 BUG_ON(!server->nfs_client); 990 BUG_ON(!server->nfs_client);
988 BUG_ON(!server->nfs_client->rpc_ops); 991 BUG_ON(!server->nfs_client->rpc_ops);
989 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); 992 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1056 if (error < 0) 1059 if (error < 0)
1057 goto error; 1060 goto error;
1058 1061
1062 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1063 server->namelen = NFS4_MAXNAMLEN;
1064
1059 dprintk("Referral FSID: %llx:%llx\n", 1065 dprintk("Referral FSID: %llx:%llx\n",
1060 (unsigned long long) server->fsid.major, 1066 (unsigned long long) server->fsid.major,
1061 (unsigned long long) server->fsid.minor); 1067 (unsigned long long) server->fsid.minor);
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
1115 if (error < 0) 1121 if (error < 0)
1116 goto out_free_server; 1122 goto out_free_server;
1117 1123
1124 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1125 server->namelen = NFS4_MAXNAMLEN;
1126
1118 dprintk("Cloned FSID: %llx:%llx\n", 1127 dprintk("Cloned FSID: %llx:%llx\n",
1119 (unsigned long long) server->fsid.major, 1128 (unsigned long long) server->fsid.major,
1120 (unsigned long long) server->fsid.minor); 1129 (unsigned long long) server->fsid.minor);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ea97408e423e..e4a04d16b8b0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
1162 } 1162 }
1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) 1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
1164 return NULL; 1164 return NULL;
1165 if (name.len > NFS_SERVER(dir)->namelen)
1166 return NULL;
1165 /* Note: caller is already holding the dir->i_mutex! */ 1167 /* Note: caller is already holding the dir->i_mutex! */
1166 dentry = d_alloc(parent, &name); 1168 dentry = d_alloc(parent, &name);
1167 if (dentry == NULL) 1169 if (dentry == NULL)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index c87dc713b5d7..579cf8a7d4a7 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -316,7 +316,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
316 if (offset != 0) 316 if (offset != 0)
317 return; 317 return;
318 /* Cancel any unstarted writes on this page */ 318 /* Cancel any unstarted writes on this page */
319 nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE); 319 nfs_wb_page_cancel(page->mapping->host, page);
320} 320}
321 321
322static int nfs_release_page(struct page *page, gfp_t gfp) 322static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index d1cbf0a0fbb2..522e5ad4d8ad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -175,6 +175,9 @@ next_component:
175 path++; 175 path++;
176 name.len = path - (const char *) name.name; 176 name.len = path - (const char *) name.name;
177 177
178 if (name.len > NFS4_MAXNAMLEN)
179 return -ENAMETOOLONG;
180
178eat_dot_dir: 181eat_dot_dir:
179 while (*path == '/') 182 while (*path == '/')
180 path++; 183 path++;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index aea76d0e5fbd..acfc56f9edc0 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -176,7 +176,7 @@ static void nfs_expire_automounts(struct work_struct *work)
176void nfs_release_automount_timer(void) 176void nfs_release_automount_timer(void)
177{ 177{
178 if (list_empty(&nfs_automount_list)) 178 if (list_empty(&nfs_automount_list))
179 cancel_delayed_work_sync(&nfs_automount_task); 179 cancel_delayed_work(&nfs_automount_task);
180} 180}
181 181
182/* 182/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 62b3ae280310..4b90e17555a9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -646,7 +646,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
646 rcu_read_lock(); 646 rcu_read_lock();
647 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 647 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
648 if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0) 648 if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0)
649 delegation_type = delegation->flags; 649 delegation_type = delegation->type;
650 rcu_read_unlock(); 650 rcu_read_unlock();
651 opendata->o_arg.u.delegation_type = delegation_type; 651 opendata->o_arg.u.delegation_type = delegation_type;
652 status = nfs4_open_recover(opendata, state); 652 status = nfs4_open_recover(opendata, state);
@@ -1434,7 +1434,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
1434 } 1434 }
1435 res = d_add_unique(dentry, igrab(state->inode)); 1435 res = d_add_unique(dentry, igrab(state->inode));
1436 if (res != NULL) 1436 if (res != NULL)
1437 dentry = res; 1437 path.dentry = res;
1438 nfs4_intent_set_file(nd, &path, state); 1438 nfs4_intent_set_file(nd, &path, state);
1439 return res; 1439 return res;
1440} 1440}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index b2a851c1b8cb..b878528b64c1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -345,8 +345,8 @@ void __exit unregister_nfs_fs(void)
345 unregister_shrinker(&acl_shrinker); 345 unregister_shrinker(&acl_shrinker);
346#ifdef CONFIG_NFS_V4 346#ifdef CONFIG_NFS_V4
347 unregister_filesystem(&nfs4_fs_type); 347 unregister_filesystem(&nfs4_fs_type);
348 nfs_unregister_sysctl();
349#endif 348#endif
349 nfs_unregister_sysctl();
350 unregister_filesystem(&nfs_fs_type); 350 unregister_filesystem(&nfs_fs_type);
351} 351}
352 352
@@ -911,13 +911,13 @@ static int nfs_parse_mount_options(char *raw,
911 kfree(string); 911 kfree(string);
912 912
913 switch (token) { 913 switch (token) {
914 case Opt_udp: 914 case Opt_xprt_udp:
915 mnt->flags &= ~NFS_MOUNT_TCP; 915 mnt->flags &= ~NFS_MOUNT_TCP;
916 mnt->nfs_server.protocol = IPPROTO_UDP; 916 mnt->nfs_server.protocol = IPPROTO_UDP;
917 mnt->timeo = 7; 917 mnt->timeo = 7;
918 mnt->retrans = 5; 918 mnt->retrans = 5;
919 break; 919 break;
920 case Opt_tcp: 920 case Opt_xprt_tcp:
921 mnt->flags |= NFS_MOUNT_TCP; 921 mnt->flags |= NFS_MOUNT_TCP;
922 mnt->nfs_server.protocol = IPPROTO_TCP; 922 mnt->nfs_server.protocol = IPPROTO_TCP;
923 mnt->timeo = 600; 923 mnt->timeo = 600;
@@ -936,10 +936,10 @@ static int nfs_parse_mount_options(char *raw,
936 kfree(string); 936 kfree(string);
937 937
938 switch (token) { 938 switch (token) {
939 case Opt_udp: 939 case Opt_xprt_udp:
940 mnt->mount_server.protocol = IPPROTO_UDP; 940 mnt->mount_server.protocol = IPPROTO_UDP;
941 break; 941 break;
942 case Opt_tcp: 942 case Opt_xprt_tcp:
943 mnt->mount_server.protocol = IPPROTO_TCP; 943 mnt->mount_server.protocol = IPPROTO_TCP;
944 break; 944 break;
945 default: 945 default:
@@ -1153,20 +1153,20 @@ static int nfs_validate_mount_data(struct nfs_mount_data **options,
1153 c = strchr(dev_name, ':'); 1153 c = strchr(dev_name, ':');
1154 if (c == NULL) 1154 if (c == NULL)
1155 return -EINVAL; 1155 return -EINVAL;
1156 len = c - dev_name - 1; 1156 len = c - dev_name;
1157 if (len > sizeof(data->hostname)) 1157 if (len > sizeof(data->hostname))
1158 return -EINVAL; 1158 return -ENAMETOOLONG;
1159 strncpy(data->hostname, dev_name, len); 1159 strncpy(data->hostname, dev_name, len);
1160 args.nfs_server.hostname = data->hostname; 1160 args.nfs_server.hostname = data->hostname;
1161 1161
1162 c++; 1162 c++;
1163 if (strlen(c) > NFS_MAXPATHLEN) 1163 if (strlen(c) > NFS_MAXPATHLEN)
1164 return -EINVAL; 1164 return -ENAMETOOLONG;
1165 args.nfs_server.export_path = c; 1165 args.nfs_server.export_path = c;
1166 1166
1167 status = nfs_try_mount(&args, mntfh); 1167 status = nfs_try_mount(&args, mntfh);
1168 if (status) 1168 if (status)
1169 return -EINVAL; 1169 return status;
1170 1170
1171 /* 1171 /*
1172 * Translate to nfs_mount_data, which nfs_fill_super 1172 * Translate to nfs_mount_data, which nfs_fill_super
@@ -1303,34 +1303,6 @@ static void nfs_clone_super(struct super_block *sb,
1303 nfs_initialise_sb(sb); 1303 nfs_initialise_sb(sb);
1304} 1304}
1305 1305
1306static int nfs_set_super(struct super_block *s, void *_server)
1307{
1308 struct nfs_server *server = _server;
1309 int ret;
1310
1311 s->s_fs_info = server;
1312 ret = set_anon_super(s, server);
1313 if (ret == 0)
1314 server->s_dev = s->s_dev;
1315 return ret;
1316}
1317
1318static int nfs_compare_super(struct super_block *sb, void *data)
1319{
1320 struct nfs_server *server = data, *old = NFS_SB(sb);
1321
1322 if (memcmp(&old->nfs_client->cl_addr,
1323 &server->nfs_client->cl_addr,
1324 sizeof(old->nfs_client->cl_addr)) != 0)
1325 return 0;
1326 /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
1327 if (old->flags & NFS_MOUNT_UNSHARED)
1328 return 0;
1329 if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0)
1330 return 0;
1331 return 1;
1332}
1333
1334#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS) 1306#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
1335 1307
1336static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags) 1308static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
@@ -1359,9 +1331,46 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
1359 goto Ebusy; 1331 goto Ebusy;
1360 if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor) 1332 if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
1361 goto Ebusy; 1333 goto Ebusy;
1362 return 0; 1334 return 1;
1363Ebusy: 1335Ebusy:
1364 return -EBUSY; 1336 return 0;
1337}
1338
1339struct nfs_sb_mountdata {
1340 struct nfs_server *server;
1341 int mntflags;
1342};
1343
1344static int nfs_set_super(struct super_block *s, void *data)
1345{
1346 struct nfs_sb_mountdata *sb_mntdata = data;
1347 struct nfs_server *server = sb_mntdata->server;
1348 int ret;
1349
1350 s->s_flags = sb_mntdata->mntflags;
1351 s->s_fs_info = server;
1352 ret = set_anon_super(s, server);
1353 if (ret == 0)
1354 server->s_dev = s->s_dev;
1355 return ret;
1356}
1357
1358static int nfs_compare_super(struct super_block *sb, void *data)
1359{
1360 struct nfs_sb_mountdata *sb_mntdata = data;
1361 struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
1362 int mntflags = sb_mntdata->mntflags;
1363
1364 if (memcmp(&old->nfs_client->cl_addr,
1365 &server->nfs_client->cl_addr,
1366 sizeof(old->nfs_client->cl_addr)) != 0)
1367 return 0;
1368 /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
1369 if (old->flags & NFS_MOUNT_UNSHARED)
1370 return 0;
1371 if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0)
1372 return 0;
1373 return nfs_compare_mount_options(sb, server, mntflags);
1365} 1374}
1366 1375
1367static int nfs_get_sb(struct file_system_type *fs_type, 1376static int nfs_get_sb(struct file_system_type *fs_type,
@@ -1373,6 +1382,9 @@ static int nfs_get_sb(struct file_system_type *fs_type,
1373 struct nfs_mount_data *data = raw_data; 1382 struct nfs_mount_data *data = raw_data;
1374 struct dentry *mntroot; 1383 struct dentry *mntroot;
1375 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1384 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
1385 struct nfs_sb_mountdata sb_mntdata = {
1386 .mntflags = flags,
1387 };
1376 int error; 1388 int error;
1377 1389
1378 /* Validate the mount data */ 1390 /* Validate the mount data */
@@ -1386,28 +1398,25 @@ static int nfs_get_sb(struct file_system_type *fs_type,
1386 error = PTR_ERR(server); 1398 error = PTR_ERR(server);
1387 goto out; 1399 goto out;
1388 } 1400 }
1401 sb_mntdata.server = server;
1389 1402
1390 if (server->flags & NFS_MOUNT_UNSHARED) 1403 if (server->flags & NFS_MOUNT_UNSHARED)
1391 compare_super = NULL; 1404 compare_super = NULL;
1392 1405
1393 /* Get a superblock - note that we may end up sharing one that already exists */ 1406 /* Get a superblock - note that we may end up sharing one that already exists */
1394 s = sget(fs_type, compare_super, nfs_set_super, server); 1407 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
1395 if (IS_ERR(s)) { 1408 if (IS_ERR(s)) {
1396 error = PTR_ERR(s); 1409 error = PTR_ERR(s);
1397 goto out_err_nosb; 1410 goto out_err_nosb;
1398 } 1411 }
1399 1412
1400 if (s->s_fs_info != server) { 1413 if (s->s_fs_info != server) {
1401 error = nfs_compare_mount_options(s, server, flags);
1402 nfs_free_server(server); 1414 nfs_free_server(server);
1403 server = NULL; 1415 server = NULL;
1404 if (error < 0)
1405 goto error_splat_super;
1406 } 1416 }
1407 1417
1408 if (!s->s_root) { 1418 if (!s->s_root) {
1409 /* initial superblock/root creation */ 1419 /* initial superblock/root creation */
1410 s->s_flags = flags;
1411 nfs_fill_super(s, data); 1420 nfs_fill_super(s, data);
1412 } 1421 }
1413 1422
@@ -1460,6 +1469,9 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
1460 struct nfs_server *server; 1469 struct nfs_server *server;
1461 struct dentry *mntroot; 1470 struct dentry *mntroot;
1462 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1471 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
1472 struct nfs_sb_mountdata sb_mntdata = {
1473 .mntflags = flags,
1474 };
1463 int error; 1475 int error;
1464 1476
1465 dprintk("--> nfs_xdev_get_sb()\n"); 1477 dprintk("--> nfs_xdev_get_sb()\n");
@@ -1470,28 +1482,25 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
1470 error = PTR_ERR(server); 1482 error = PTR_ERR(server);
1471 goto out_err_noserver; 1483 goto out_err_noserver;
1472 } 1484 }
1485 sb_mntdata.server = server;
1473 1486
1474 if (server->flags & NFS_MOUNT_UNSHARED) 1487 if (server->flags & NFS_MOUNT_UNSHARED)
1475 compare_super = NULL; 1488 compare_super = NULL;
1476 1489
1477 /* Get a superblock - note that we may end up sharing one that already exists */ 1490 /* Get a superblock - note that we may end up sharing one that already exists */
1478 s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 1491 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
1479 if (IS_ERR(s)) { 1492 if (IS_ERR(s)) {
1480 error = PTR_ERR(s); 1493 error = PTR_ERR(s);
1481 goto out_err_nosb; 1494 goto out_err_nosb;
1482 } 1495 }
1483 1496
1484 if (s->s_fs_info != server) { 1497 if (s->s_fs_info != server) {
1485 error = nfs_compare_mount_options(s, server, flags);
1486 nfs_free_server(server); 1498 nfs_free_server(server);
1487 server = NULL; 1499 server = NULL;
1488 if (error < 0)
1489 goto error_splat_super;
1490 } 1500 }
1491 1501
1492 if (!s->s_root) { 1502 if (!s->s_root) {
1493 /* initial superblock/root creation */ 1503 /* initial superblock/root creation */
1494 s->s_flags = flags;
1495 nfs_clone_super(s, data->sb); 1504 nfs_clone_super(s, data->sb);
1496 } 1505 }
1497 1506
@@ -1668,7 +1677,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options,
1668 /* while calculating len, pretend ':' is '\0' */ 1677 /* while calculating len, pretend ':' is '\0' */
1669 len = c - dev_name; 1678 len = c - dev_name;
1670 if (len > NFS4_MAXNAMLEN) 1679 if (len > NFS4_MAXNAMLEN)
1671 return -EINVAL; 1680 return -ENAMETOOLONG;
1672 *hostname = kzalloc(len, GFP_KERNEL); 1681 *hostname = kzalloc(len, GFP_KERNEL);
1673 if (*hostname == NULL) 1682 if (*hostname == NULL)
1674 return -ENOMEM; 1683 return -ENOMEM;
@@ -1677,7 +1686,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options,
1677 c++; /* step over the ':' */ 1686 c++; /* step over the ':' */
1678 len = strlen(c); 1687 len = strlen(c);
1679 if (len > NFS4_MAXPATHLEN) 1688 if (len > NFS4_MAXPATHLEN)
1680 return -EINVAL; 1689 return -ENAMETOOLONG;
1681 *mntpath = kzalloc(len + 1, GFP_KERNEL); 1690 *mntpath = kzalloc(len + 1, GFP_KERNEL);
1682 if (*mntpath == NULL) 1691 if (*mntpath == NULL)
1683 return -ENOMEM; 1692 return -ENOMEM;
@@ -1729,6 +1738,9 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
1729 struct dentry *mntroot; 1738 struct dentry *mntroot;
1730 char *mntpath = NULL, *hostname = NULL, *ip_addr = NULL; 1739 char *mntpath = NULL, *hostname = NULL, *ip_addr = NULL;
1731 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1740 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
1741 struct nfs_sb_mountdata sb_mntdata = {
1742 .mntflags = flags,
1743 };
1732 int error; 1744 int error;
1733 1745
1734 /* Validate the mount data */ 1746 /* Validate the mount data */
@@ -1744,12 +1756,13 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
1744 error = PTR_ERR(server); 1756 error = PTR_ERR(server);
1745 goto out; 1757 goto out;
1746 } 1758 }
1759 sb_mntdata.server = server;
1747 1760
1748 if (server->flags & NFS4_MOUNT_UNSHARED) 1761 if (server->flags & NFS4_MOUNT_UNSHARED)
1749 compare_super = NULL; 1762 compare_super = NULL;
1750 1763
1751 /* Get a superblock - note that we may end up sharing one that already exists */ 1764 /* Get a superblock - note that we may end up sharing one that already exists */
1752 s = sget(fs_type, compare_super, nfs_set_super, server); 1765 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
1753 if (IS_ERR(s)) { 1766 if (IS_ERR(s)) {
1754 error = PTR_ERR(s); 1767 error = PTR_ERR(s);
1755 goto out_free; 1768 goto out_free;
@@ -1762,7 +1775,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
1762 1775
1763 if (!s->s_root) { 1776 if (!s->s_root) {
1764 /* initial superblock/root creation */ 1777 /* initial superblock/root creation */
1765 s->s_flags = flags;
1766 nfs4_fill_super(s); 1778 nfs4_fill_super(s);
1767 } 1779 }
1768 1780
@@ -1816,6 +1828,9 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
1816 struct nfs_server *server; 1828 struct nfs_server *server;
1817 struct dentry *mntroot; 1829 struct dentry *mntroot;
1818 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1830 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
1831 struct nfs_sb_mountdata sb_mntdata = {
1832 .mntflags = flags,
1833 };
1819 int error; 1834 int error;
1820 1835
1821 dprintk("--> nfs4_xdev_get_sb()\n"); 1836 dprintk("--> nfs4_xdev_get_sb()\n");
@@ -1826,12 +1841,13 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
1826 error = PTR_ERR(server); 1841 error = PTR_ERR(server);
1827 goto out_err_noserver; 1842 goto out_err_noserver;
1828 } 1843 }
1844 sb_mntdata.server = server;
1829 1845
1830 if (server->flags & NFS4_MOUNT_UNSHARED) 1846 if (server->flags & NFS4_MOUNT_UNSHARED)
1831 compare_super = NULL; 1847 compare_super = NULL;
1832 1848
1833 /* Get a superblock - note that we may end up sharing one that already exists */ 1849 /* Get a superblock - note that we may end up sharing one that already exists */
1834 s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 1850 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
1835 if (IS_ERR(s)) { 1851 if (IS_ERR(s)) {
1836 error = PTR_ERR(s); 1852 error = PTR_ERR(s);
1837 goto out_err_nosb; 1853 goto out_err_nosb;
@@ -1844,7 +1860,6 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
1844 1860
1845 if (!s->s_root) { 1861 if (!s->s_root) {
1846 /* initial superblock/root creation */ 1862 /* initial superblock/root creation */
1847 s->s_flags = flags;
1848 nfs4_clone_super(s, data->sb); 1863 nfs4_clone_super(s, data->sb);
1849 } 1864 }
1850 1865
@@ -1887,6 +1902,9 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
1887 struct dentry *mntroot; 1902 struct dentry *mntroot;
1888 struct nfs_fh mntfh; 1903 struct nfs_fh mntfh;
1889 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1904 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
1905 struct nfs_sb_mountdata sb_mntdata = {
1906 .mntflags = flags,
1907 };
1890 int error; 1908 int error;
1891 1909
1892 dprintk("--> nfs4_referral_get_sb()\n"); 1910 dprintk("--> nfs4_referral_get_sb()\n");
@@ -1897,12 +1915,13 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
1897 error = PTR_ERR(server); 1915 error = PTR_ERR(server);
1898 goto out_err_noserver; 1916 goto out_err_noserver;
1899 } 1917 }
1918 sb_mntdata.server = server;
1900 1919
1901 if (server->flags & NFS4_MOUNT_UNSHARED) 1920 if (server->flags & NFS4_MOUNT_UNSHARED)
1902 compare_super = NULL; 1921 compare_super = NULL;
1903 1922
1904 /* Get a superblock - note that we may end up sharing one that already exists */ 1923 /* Get a superblock - note that we may end up sharing one that already exists */
1905 s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 1924 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
1906 if (IS_ERR(s)) { 1925 if (IS_ERR(s)) {
1907 error = PTR_ERR(s); 1926 error = PTR_ERR(s);
1908 goto out_err_nosb; 1927 goto out_err_nosb;
@@ -1915,7 +1934,6 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
1915 1934
1916 if (!s->s_root) { 1935 if (!s->s_root) {
1917 /* initial superblock/root creation */ 1936 /* initial superblock/root creation */
1918 s->s_flags = flags;
1919 nfs4_fill_super(s); 1937 nfs4_fill_super(s);
1920 } 1938 }
1921 1939
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ef97e0c0f5b1..0d7a77cc394b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1396,6 +1396,50 @@ out:
1396 return ret; 1396 return ret;
1397} 1397}
1398 1398
1399int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1400{
1401 struct nfs_page *req;
1402 loff_t range_start = page_offset(page);
1403 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1404 struct writeback_control wbc = {
1405 .bdi = page->mapping->backing_dev_info,
1406 .sync_mode = WB_SYNC_ALL,
1407 .nr_to_write = LONG_MAX,
1408 .range_start = range_start,
1409 .range_end = range_end,
1410 };
1411 int ret = 0;
1412
1413 BUG_ON(!PageLocked(page));
1414 for (;;) {
1415 req = nfs_page_find_request(page);
1416 if (req == NULL)
1417 goto out;
1418 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1419 nfs_release_request(req);
1420 break;
1421 }
1422 if (nfs_lock_request_dontget(req)) {
1423 nfs_inode_remove_request(req);
1424 /*
1425 * In case nfs_inode_remove_request has marked the
1426 * page as being dirty
1427 */
1428 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1429 nfs_unlock_request(req);
1430 break;
1431 }
1432 ret = nfs_wait_on_request(req);
1433 if (ret < 0)
1434 goto out;
1435 }
1436 if (!PagePrivate(page))
1437 return 0;
1438 ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1439out:
1440 return ret;
1441}
1442
1399int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) 1443int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1400{ 1444{
1401 loff_t range_start = page_offset(page); 1445 loff_t range_start = page_offset(page);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 0eb464a39aae..7011d62acfc8 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -566,13 +566,23 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
566 case FSID_DEV: 566 case FSID_DEV:
567 case FSID_ENCODE_DEV: 567 case FSID_ENCODE_DEV:
568 case FSID_MAJOR_MINOR: 568 case FSID_MAJOR_MINOR:
569 return FSIDSOURCE_DEV; 569 if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags
570 & FS_REQUIRES_DEV)
571 return FSIDSOURCE_DEV;
572 break;
570 case FSID_NUM: 573 case FSID_NUM:
571 return FSIDSOURCE_FSID;
572 default:
573 if (fhp->fh_export->ex_flags & NFSEXP_FSID) 574 if (fhp->fh_export->ex_flags & NFSEXP_FSID)
574 return FSIDSOURCE_FSID; 575 return FSIDSOURCE_FSID;
575 else 576 break;
576 return FSIDSOURCE_UUID; 577 default:
578 break;
577 } 579 }
580 /* either a UUID type filehandle, or the filehandle doesn't
581 * match the export.
582 */
583 if (fhp->fh_export->ex_flags & NFSEXP_FSID)
584 return FSIDSOURCE_FSID;
585 if (fhp->fh_export->ex_uuid)
586 return FSIDSOURCE_UUID;
587 return FSIDSOURCE_DEV;
578} 588}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index a0c2b253818b..7867151ebb83 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -115,7 +115,8 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
115 115
116 exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts); 116 exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts);
117 if (IS_ERR(exp2)) { 117 if (IS_ERR(exp2)) {
118 err = PTR_ERR(exp2); 118 if (PTR_ERR(exp2) != -ENOENT)
119 err = PTR_ERR(exp2);
119 dput(mounts); 120 dput(mounts);
120 mntput(mnt); 121 mntput(mnt);
121 goto out; 122 goto out;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 4f517665c9a0..778a850b4634 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5602,6 +5602,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
5602 clusters_to_del; 5602 clusters_to_del;
5603 spin_unlock(&OCFS2_I(inode)->ip_lock); 5603 spin_unlock(&OCFS2_I(inode)->ip_lock);
5604 le32_add_cpu(&fe->i_clusters, -clusters_to_del); 5604 le32_add_cpu(&fe->i_clusters, -clusters_to_del);
5605 inode->i_blocks = ocfs2_inode_sector_count(inode);
5605 5606
5606 status = ocfs2_trim_tree(inode, path, handle, tc, 5607 status = ocfs2_trim_tree(inode, path, handle, tc,
5607 clusters_to_del, &delete_blk); 5608 clusters_to_del, &delete_blk);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 460d440310f2..f37f25c931f5 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -855,6 +855,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
855 struct ocfs2_super *osb, loff_t pos, 855 struct ocfs2_super *osb, loff_t pos,
856 unsigned len, struct buffer_head *di_bh) 856 unsigned len, struct buffer_head *di_bh)
857{ 857{
858 u32 cend;
858 struct ocfs2_write_ctxt *wc; 859 struct ocfs2_write_ctxt *wc;
859 860
860 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); 861 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
@@ -862,7 +863,8 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
862 return -ENOMEM; 863 return -ENOMEM;
863 864
864 wc->w_cpos = pos >> osb->s_clustersize_bits; 865 wc->w_cpos = pos >> osb->s_clustersize_bits;
865 wc->w_clen = ocfs2_clusters_for_bytes(osb->sb, len); 866 cend = (pos + len - 1) >> osb->s_clustersize_bits;
867 wc->w_clen = cend - wc->w_cpos + 1;
866 get_bh(di_bh); 868 get_bh(di_bh);
867 wc->w_di_bh = di_bh; 869 wc->w_di_bh = di_bh;
868 870
@@ -928,18 +930,11 @@ static void ocfs2_write_failure(struct inode *inode,
928 loff_t user_pos, unsigned user_len) 930 loff_t user_pos, unsigned user_len)
929{ 931{
930 int i; 932 int i;
931 unsigned from, to; 933 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
934 to = user_pos + user_len;
932 struct page *tmppage; 935 struct page *tmppage;
933 936
934 ocfs2_zero_new_buffers(wc->w_target_page, user_pos, user_len); 937 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
935
936 if (wc->w_large_pages) {
937 from = wc->w_target_from;
938 to = wc->w_target_to;
939 } else {
940 from = 0;
941 to = PAGE_CACHE_SIZE;
942 }
943 938
944 for(i = 0; i < wc->w_num_pages; i++) { 939 for(i = 0; i < wc->w_num_pages; i++) {
945 tmppage = wc->w_pages[i]; 940 tmppage = wc->w_pages[i];
@@ -989,9 +984,6 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
989 map_from = cluster_start; 984 map_from = cluster_start;
990 map_to = cluster_end; 985 map_to = cluster_end;
991 } 986 }
992
993 wc->w_target_from = map_from;
994 wc->w_target_to = map_to;
995 } else { 987 } else {
996 /* 988 /*
997 * If we haven't allocated the new page yet, we 989 * If we haven't allocated the new page yet, we
@@ -1209,18 +1201,33 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1209 loff_t pos, unsigned len) 1201 loff_t pos, unsigned len)
1210{ 1202{
1211 int ret, i; 1203 int ret, i;
1204 loff_t cluster_off;
1205 unsigned int local_len = len;
1212 struct ocfs2_write_cluster_desc *desc; 1206 struct ocfs2_write_cluster_desc *desc;
1207 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1213 1208
1214 for (i = 0; i < wc->w_clen; i++) { 1209 for (i = 0; i < wc->w_clen; i++) {
1215 desc = &wc->w_desc[i]; 1210 desc = &wc->w_desc[i];
1216 1211
1212 /*
1213 * We have to make sure that the total write passed in
1214 * doesn't extend past a single cluster.
1215 */
1216 local_len = len;
1217 cluster_off = pos & (osb->s_clustersize - 1);
1218 if ((cluster_off + local_len) > osb->s_clustersize)
1219 local_len = osb->s_clustersize - cluster_off;
1220
1217 ret = ocfs2_write_cluster(mapping, desc->c_phys, 1221 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1218 desc->c_unwritten, data_ac, meta_ac, 1222 desc->c_unwritten, data_ac, meta_ac,
1219 wc, desc->c_cpos, pos, len); 1223 wc, desc->c_cpos, pos, local_len);
1220 if (ret) { 1224 if (ret) {
1221 mlog_errno(ret); 1225 mlog_errno(ret);
1222 goto out; 1226 goto out;
1223 } 1227 }
1228
1229 len -= local_len;
1230 pos += local_len;
1224 } 1231 }
1225 1232
1226 ret = 0; 1233 ret = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 4ffa715be09c..f3bc3658e7a5 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -314,7 +314,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
314 } 314 }
315 315
316 i_size_write(inode, new_i_size); 316 i_size_write(inode, new_i_size);
317 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
318 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 317 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
319 318
320 di = (struct ocfs2_dinode *) fe_bh->b_data; 319 di = (struct ocfs2_dinode *) fe_bh->b_data;
@@ -492,8 +491,8 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
492 goto leave; 491 goto leave;
493 } 492 }
494 493
495 status = ocfs2_claim_clusters(osb, handle, data_ac, 1, 494 status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
496 &bit_off, &num_bits); 495 clusters_to_add, &bit_off, &num_bits);
497 if (status < 0) { 496 if (status < 0) {
498 if (status != -ENOSPC) 497 if (status != -ENOSPC)
499 mlog_errno(status); 498 mlog_errno(status);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 545f7892cdf3..d272847d5a07 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -514,8 +514,10 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
514 ac->ac_bh = osb->local_alloc_bh; 514 ac->ac_bh = osb->local_alloc_bh;
515 status = 0; 515 status = 0;
516bail: 516bail:
517 if (status < 0 && local_alloc_inode) 517 if (status < 0 && local_alloc_inode) {
518 mutex_unlock(&local_alloc_inode->i_mutex);
518 iput(local_alloc_inode); 519 iput(local_alloc_inode);
520 }
519 521
520 mlog_exit(status); 522 mlog_exit(status);
521 return status; 523 return status;
@@ -524,13 +526,12 @@ bail:
524int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, 526int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
525 handle_t *handle, 527 handle_t *handle,
526 struct ocfs2_alloc_context *ac, 528 struct ocfs2_alloc_context *ac,
527 u32 min_bits, 529 u32 bits_wanted,
528 u32 *bit_off, 530 u32 *bit_off,
529 u32 *num_bits) 531 u32 *num_bits)
530{ 532{
531 int status, start; 533 int status, start;
532 struct inode *local_alloc_inode; 534 struct inode *local_alloc_inode;
533 u32 bits_wanted;
534 void *bitmap; 535 void *bitmap;
535 struct ocfs2_dinode *alloc; 536 struct ocfs2_dinode *alloc;
536 struct ocfs2_local_alloc *la; 537 struct ocfs2_local_alloc *la;
@@ -538,7 +539,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
538 mlog_entry_void(); 539 mlog_entry_void();
539 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); 540 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
540 541
541 bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
542 local_alloc_inode = ac->ac_inode; 542 local_alloc_inode = ac->ac_inode;
543 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 543 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
544 la = OCFS2_LOCAL_ALLOC(alloc); 544 la = OCFS2_LOCAL_ALLOC(alloc);
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 385a10152f9c..3f76631e110c 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -48,7 +48,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
48int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, 48int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
49 handle_t *handle, 49 handle_t *handle,
50 struct ocfs2_alloc_context *ac, 50 struct ocfs2_alloc_context *ac,
51 u32 min_bits, 51 u32 bits_wanted,
52 u32 *bit_off, 52 u32 *bit_off,
53 u32 *num_bits); 53 u32 *num_bits);
54 54
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index d9c5c9fcb30f..8f09f5235e3a 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1486,21 +1486,21 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode,
1486 * contig. allocation, set to '1' to indicate we can deal with extents 1486 * contig. allocation, set to '1' to indicate we can deal with extents
1487 * of any size. 1487 * of any size.
1488 */ 1488 */
1489int ocfs2_claim_clusters(struct ocfs2_super *osb, 1489int __ocfs2_claim_clusters(struct ocfs2_super *osb,
1490 handle_t *handle, 1490 handle_t *handle,
1491 struct ocfs2_alloc_context *ac, 1491 struct ocfs2_alloc_context *ac,
1492 u32 min_clusters, 1492 u32 min_clusters,
1493 u32 *cluster_start, 1493 u32 max_clusters,
1494 u32 *num_clusters) 1494 u32 *cluster_start,
1495 u32 *num_clusters)
1495{ 1496{
1496 int status; 1497 int status;
1497 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; 1498 unsigned int bits_wanted = max_clusters;
1498 u64 bg_blkno = 0; 1499 u64 bg_blkno = 0;
1499 u16 bg_bit_off; 1500 u16 bg_bit_off;
1500 1501
1501 mlog_entry_void(); 1502 mlog_entry_void();
1502 1503
1503 BUG_ON(!ac);
1504 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); 1504 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1505 1505
1506 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL 1506 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
@@ -1557,6 +1557,19 @@ bail:
1557 return status; 1557 return status;
1558} 1558}
1559 1559
1560int ocfs2_claim_clusters(struct ocfs2_super *osb,
1561 handle_t *handle,
1562 struct ocfs2_alloc_context *ac,
1563 u32 min_clusters,
1564 u32 *cluster_start,
1565 u32 *num_clusters)
1566{
1567 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
1568
1569 return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
1570 bits_wanted, cluster_start, num_clusters);
1571}
1572
1560static inline int ocfs2_block_group_clear_bits(handle_t *handle, 1573static inline int ocfs2_block_group_clear_bits(handle_t *handle,
1561 struct inode *alloc_inode, 1574 struct inode *alloc_inode,
1562 struct ocfs2_group_desc *bg, 1575 struct ocfs2_group_desc *bg,
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index f212dc01a84b..cafe93703095 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -85,6 +85,17 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
85 u32 min_clusters, 85 u32 min_clusters,
86 u32 *cluster_start, 86 u32 *cluster_start,
87 u32 *num_clusters); 87 u32 *num_clusters);
88/*
89 * Use this variant of ocfs2_claim_clusters to specify a maxiumum
90 * number of clusters smaller than the allocation reserved.
91 */
92int __ocfs2_claim_clusters(struct ocfs2_super *osb,
93 handle_t *handle,
94 struct ocfs2_alloc_context *ac,
95 u32 min_clusters,
96 u32 max_clusters,
97 u32 *cluster_start,
98 u32 *num_clusters);
88 99
89int ocfs2_free_suballoc_bits(handle_t *handle, 100int ocfs2_free_suballoc_bits(handle_t *handle,
90 struct inode *alloc_inode, 101 struct inode *alloc_inode,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index f2fc9a795deb..c034b5129c1e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -81,8 +81,15 @@ static struct dentry *ocfs2_debugfs_root = NULL;
81MODULE_AUTHOR("Oracle"); 81MODULE_AUTHOR("Oracle");
82MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
83 83
84struct mount_options
85{
86 unsigned long mount_opt;
87 unsigned int atime_quantum;
88 signed short slot;
89};
90
84static int ocfs2_parse_options(struct super_block *sb, char *options, 91static int ocfs2_parse_options(struct super_block *sb, char *options,
85 unsigned long *mount_opt, s16 *slot, 92 struct mount_options *mopt,
86 int is_remount); 93 int is_remount);
87static void ocfs2_put_super(struct super_block *sb); 94static void ocfs2_put_super(struct super_block *sb);
88static int ocfs2_mount_volume(struct super_block *sb); 95static int ocfs2_mount_volume(struct super_block *sb);
@@ -367,24 +374,23 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
367{ 374{
368 int incompat_features; 375 int incompat_features;
369 int ret = 0; 376 int ret = 0;
370 unsigned long parsed_options; 377 struct mount_options parsed_options;
371 s16 slot;
372 struct ocfs2_super *osb = OCFS2_SB(sb); 378 struct ocfs2_super *osb = OCFS2_SB(sb);
373 379
374 if (!ocfs2_parse_options(sb, data, &parsed_options, &slot, 1)) { 380 if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) {
375 ret = -EINVAL; 381 ret = -EINVAL;
376 goto out; 382 goto out;
377 } 383 }
378 384
379 if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) != 385 if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
380 (parsed_options & OCFS2_MOUNT_HB_LOCAL)) { 386 (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
381 ret = -EINVAL; 387 ret = -EINVAL;
382 mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); 388 mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
383 goto out; 389 goto out;
384 } 390 }
385 391
386 if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != 392 if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
387 (parsed_options & OCFS2_MOUNT_DATA_WRITEBACK)) { 393 (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
388 ret = -EINVAL; 394 ret = -EINVAL;
389 mlog(ML_ERROR, "Cannot change data mode on remount\n"); 395 mlog(ML_ERROR, "Cannot change data mode on remount\n");
390 goto out; 396 goto out;
@@ -435,7 +441,9 @@ unlock_osb:
435 441
436 /* Only save off the new mount options in case of a successful 442 /* Only save off the new mount options in case of a successful
437 * remount. */ 443 * remount. */
438 osb->s_mount_opt = parsed_options; 444 osb->s_mount_opt = parsed_options.mount_opt;
445 osb->s_atime_quantum = parsed_options.atime_quantum;
446 osb->preferred_slot = parsed_options.slot;
439 } 447 }
440out: 448out:
441 return ret; 449 return ret;
@@ -547,8 +555,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
547{ 555{
548 struct dentry *root; 556 struct dentry *root;
549 int status, sector_size; 557 int status, sector_size;
550 unsigned long parsed_opt; 558 struct mount_options parsed_options;
551 s16 slot;
552 struct inode *inode = NULL; 559 struct inode *inode = NULL;
553 struct ocfs2_super *osb = NULL; 560 struct ocfs2_super *osb = NULL;
554 struct buffer_head *bh = NULL; 561 struct buffer_head *bh = NULL;
@@ -556,14 +563,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
556 563
557 mlog_entry("%p, %p, %i", sb, data, silent); 564 mlog_entry("%p, %p, %i", sb, data, silent);
558 565
559 if (!ocfs2_parse_options(sb, data, &parsed_opt, &slot, 0)) { 566 if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
560 status = -EINVAL; 567 status = -EINVAL;
561 goto read_super_error; 568 goto read_super_error;
562 } 569 }
563 570
564 /* for now we only have one cluster/node, make sure we see it 571 /* for now we only have one cluster/node, make sure we see it
565 * in the heartbeat universe */ 572 * in the heartbeat universe */
566 if (parsed_opt & OCFS2_MOUNT_HB_LOCAL) { 573 if (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL) {
567 if (!o2hb_check_local_node_heartbeating()) { 574 if (!o2hb_check_local_node_heartbeating()) {
568 status = -EINVAL; 575 status = -EINVAL;
569 goto read_super_error; 576 goto read_super_error;
@@ -585,8 +592,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
585 } 592 }
586 brelse(bh); 593 brelse(bh);
587 bh = NULL; 594 bh = NULL;
588 osb->s_mount_opt = parsed_opt; 595 osb->s_mount_opt = parsed_options.mount_opt;
589 osb->preferred_slot = slot; 596 osb->s_atime_quantum = parsed_options.atime_quantum;
597 osb->preferred_slot = parsed_options.slot;
590 598
591 sb->s_magic = OCFS2_SUPER_MAGIC; 599 sb->s_magic = OCFS2_SUPER_MAGIC;
592 600
@@ -728,8 +736,7 @@ static struct file_system_type ocfs2_fs_type = {
728 736
729static int ocfs2_parse_options(struct super_block *sb, 737static int ocfs2_parse_options(struct super_block *sb,
730 char *options, 738 char *options,
731 unsigned long *mount_opt, 739 struct mount_options *mopt,
732 s16 *slot,
733 int is_remount) 740 int is_remount)
734{ 741{
735 int status; 742 int status;
@@ -738,8 +745,9 @@ static int ocfs2_parse_options(struct super_block *sb,
738 mlog_entry("remount: %d, options: \"%s\"\n", is_remount, 745 mlog_entry("remount: %d, options: \"%s\"\n", is_remount,
739 options ? options : "(none)"); 746 options ? options : "(none)");
740 747
741 *mount_opt = 0; 748 mopt->mount_opt = 0;
742 *slot = OCFS2_INVALID_SLOT; 749 mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
750 mopt->slot = OCFS2_INVALID_SLOT;
743 751
744 if (!options) { 752 if (!options) {
745 status = 1; 753 status = 1;
@@ -749,7 +757,6 @@ static int ocfs2_parse_options(struct super_block *sb,
749 while ((p = strsep(&options, ",")) != NULL) { 757 while ((p = strsep(&options, ",")) != NULL) {
750 int token, option; 758 int token, option;
751 substring_t args[MAX_OPT_ARGS]; 759 substring_t args[MAX_OPT_ARGS];
752 struct ocfs2_super * osb = OCFS2_SB(sb);
753 760
754 if (!*p) 761 if (!*p)
755 continue; 762 continue;
@@ -757,10 +764,10 @@ static int ocfs2_parse_options(struct super_block *sb,
757 token = match_token(p, tokens, args); 764 token = match_token(p, tokens, args);
758 switch (token) { 765 switch (token) {
759 case Opt_hb_local: 766 case Opt_hb_local:
760 *mount_opt |= OCFS2_MOUNT_HB_LOCAL; 767 mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL;
761 break; 768 break;
762 case Opt_hb_none: 769 case Opt_hb_none:
763 *mount_opt &= ~OCFS2_MOUNT_HB_LOCAL; 770 mopt->mount_opt &= ~OCFS2_MOUNT_HB_LOCAL;
764 break; 771 break;
765 case Opt_barrier: 772 case Opt_barrier:
766 if (match_int(&args[0], &option)) { 773 if (match_int(&args[0], &option)) {
@@ -768,27 +775,27 @@ static int ocfs2_parse_options(struct super_block *sb,
768 goto bail; 775 goto bail;
769 } 776 }
770 if (option) 777 if (option)
771 *mount_opt |= OCFS2_MOUNT_BARRIER; 778 mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
772 else 779 else
773 *mount_opt &= ~OCFS2_MOUNT_BARRIER; 780 mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
774 break; 781 break;
775 case Opt_intr: 782 case Opt_intr:
776 *mount_opt &= ~OCFS2_MOUNT_NOINTR; 783 mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
777 break; 784 break;
778 case Opt_nointr: 785 case Opt_nointr:
779 *mount_opt |= OCFS2_MOUNT_NOINTR; 786 mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
780 break; 787 break;
781 case Opt_err_panic: 788 case Opt_err_panic:
782 *mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; 789 mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
783 break; 790 break;
784 case Opt_err_ro: 791 case Opt_err_ro:
785 *mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; 792 mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
786 break; 793 break;
787 case Opt_data_ordered: 794 case Opt_data_ordered:
788 *mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; 795 mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
789 break; 796 break;
790 case Opt_data_writeback: 797 case Opt_data_writeback:
791 *mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; 798 mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
792 break; 799 break;
793 case Opt_atime_quantum: 800 case Opt_atime_quantum:
794 if (match_int(&args[0], &option)) { 801 if (match_int(&args[0], &option)) {
@@ -796,9 +803,7 @@ static int ocfs2_parse_options(struct super_block *sb,
796 goto bail; 803 goto bail;
797 } 804 }
798 if (option >= 0) 805 if (option >= 0)
799 osb->s_atime_quantum = option; 806 mopt->atime_quantum = option;
800 else
801 osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
802 break; 807 break;
803 case Opt_slot: 808 case Opt_slot:
804 option = 0; 809 option = 0;
@@ -807,7 +812,7 @@ static int ocfs2_parse_options(struct super_block *sb,
807 goto bail; 812 goto bail;
808 } 813 }
809 if (option) 814 if (option)
810 *slot = (s16)option; 815 mopt->slot = (s16)option;
811 break; 816 break;
812 default: 817 default:
813 mlog(ML_ERROR, 818 mlog(ML_ERROR,
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 66a13ee63d4c..c05358538f2b 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -66,7 +66,7 @@ struct ocfs2_vote_msg
66{ 66{
67 struct ocfs2_msg_hdr v_hdr; 67 struct ocfs2_msg_hdr v_hdr;
68 __be32 v_reserved1; 68 __be32 v_reserved1;
69}; 69} __attribute__ ((packed));
70 70
71/* Responses are given these values to maintain backwards 71/* Responses are given these values to maintain backwards
72 * compatibility with older ocfs2 versions */ 72 * compatibility with older ocfs2 versions */
@@ -78,7 +78,7 @@ struct ocfs2_response_msg
78{ 78{
79 struct ocfs2_msg_hdr r_hdr; 79 struct ocfs2_msg_hdr r_hdr;
80 __be32 r_response; 80 __be32 r_response;
81}; 81} __attribute__ ((packed));
82 82
83struct ocfs2_vote_work { 83struct ocfs2_vote_work {
84 struct list_head w_list; 84 struct list_head w_list;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 965625a0977d..ee4814dd98f9 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -320,7 +320,21 @@ int proc_pid_status(struct task_struct *task, char *buffer)
320 return buffer - orig; 320 return buffer - orig;
321} 321}
322 322
323static clock_t task_utime(struct task_struct *p) 323/*
324 * Use precise platform statistics if available:
325 */
326#ifdef CONFIG_VIRT_CPU_ACCOUNTING
327static cputime_t task_utime(struct task_struct *p)
328{
329 return p->utime;
330}
331
332static cputime_t task_stime(struct task_struct *p)
333{
334 return p->stime;
335}
336#else
337static cputime_t task_utime(struct task_struct *p)
324{ 338{
325 clock_t utime = cputime_to_clock_t(p->utime), 339 clock_t utime = cputime_to_clock_t(p->utime),
326 total = utime + cputime_to_clock_t(p->stime); 340 total = utime + cputime_to_clock_t(p->stime);
@@ -337,10 +351,10 @@ static clock_t task_utime(struct task_struct *p)
337 } 351 }
338 utime = (clock_t)temp; 352 utime = (clock_t)temp;
339 353
340 return utime; 354 return clock_t_to_cputime(utime);
341} 355}
342 356
343static clock_t task_stime(struct task_struct *p) 357static cputime_t task_stime(struct task_struct *p)
344{ 358{
345 clock_t stime; 359 clock_t stime;
346 360
@@ -349,10 +363,12 @@ static clock_t task_stime(struct task_struct *p)
349 * the total, to make sure the total observed by userspace 363 * the total, to make sure the total observed by userspace
350 * grows monotonically - apps rely on that): 364 * grows monotonically - apps rely on that):
351 */ 365 */
352 stime = nsec_to_clock_t(p->se.sum_exec_runtime) - task_utime(p); 366 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
367 cputime_to_clock_t(task_utime(p));
353 368
354 return stime; 369 return clock_t_to_cputime(stime);
355} 370}
371#endif
356 372
357static int do_task_stat(struct task_struct *task, char *buffer, int whole) 373static int do_task_stat(struct task_struct *task, char *buffer, int whole)
358{ 374{
@@ -368,8 +384,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
368 unsigned long long start_time; 384 unsigned long long start_time;
369 unsigned long cmin_flt = 0, cmaj_flt = 0; 385 unsigned long cmin_flt = 0, cmaj_flt = 0;
370 unsigned long min_flt = 0, maj_flt = 0; 386 unsigned long min_flt = 0, maj_flt = 0;
371 cputime_t cutime, cstime; 387 cputime_t cutime, cstime, utime, stime;
372 clock_t utime, stime;
373 unsigned long rsslim = 0; 388 unsigned long rsslim = 0;
374 char tcomm[sizeof(task->comm)]; 389 char tcomm[sizeof(task->comm)];
375 unsigned long flags; 390 unsigned long flags;
@@ -387,8 +402,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
387 402
388 sigemptyset(&sigign); 403 sigemptyset(&sigign);
389 sigemptyset(&sigcatch); 404 sigemptyset(&sigcatch);
390 cutime = cstime = cputime_zero; 405 cutime = cstime = utime = stime = cputime_zero;
391 utime = stime = 0;
392 406
393 rcu_read_lock(); 407 rcu_read_lock();
394 if (lock_task_sighand(task, &flags)) { 408 if (lock_task_sighand(task, &flags)) {
@@ -414,15 +428,15 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
414 do { 428 do {
415 min_flt += t->min_flt; 429 min_flt += t->min_flt;
416 maj_flt += t->maj_flt; 430 maj_flt += t->maj_flt;
417 utime += task_utime(t); 431 utime = cputime_add(utime, task_utime(t));
418 stime += task_stime(t); 432 stime = cputime_add(stime, task_stime(t));
419 t = next_thread(t); 433 t = next_thread(t);
420 } while (t != task); 434 } while (t != task);
421 435
422 min_flt += sig->min_flt; 436 min_flt += sig->min_flt;
423 maj_flt += sig->maj_flt; 437 maj_flt += sig->maj_flt;
424 utime += cputime_to_clock_t(sig->utime); 438 utime = cputime_add(utime, sig->utime);
425 stime += cputime_to_clock_t(sig->stime); 439 stime = cputime_add(stime, sig->stime);
426 } 440 }
427 441
428 sid = signal_session(sig); 442 sid = signal_session(sig);
@@ -471,8 +485,8 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
471 cmin_flt, 485 cmin_flt,
472 maj_flt, 486 maj_flt,
473 cmaj_flt, 487 cmaj_flt,
474 utime, 488 cputime_to_clock_t(utime),
475 stime, 489 cputime_to_clock_t(stime),
476 cputime_to_clock_t(cutime), 490 cputime_to_clock_t(cutime),
477 cputime_to_clock_t(cstime), 491 cputime_to_clock_t(cstime),
478 priority, 492 priority,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index a5b0dfd89a17..0e4d37c93eea 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/stat.h> 12#include <linux/stat.h>
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/poll.h>
14#include <linux/file.h> 15#include <linux/file.h>
15#include <linux/limits.h> 16#include <linux/limits.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -232,7 +233,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
232static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts) 233static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
233{ 234{
234 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 235 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
235 unsigned int rv = 0; 236 unsigned int rv = DEFAULT_POLLMASK;
236 unsigned int (*poll)(struct file *, struct poll_table_struct *); 237 unsigned int (*poll)(struct file *, struct poll_table_struct *);
237 238
238 spin_lock(&pde->pde_unload_lock); 239 spin_lock(&pde->pde_unload_lock);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5b68dd3f191a..a005451930b7 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1915,8 +1915,11 @@ static int reiserfs_release_dquot(struct dquot *dquot)
1915 ret = 1915 ret =
1916 journal_begin(&th, dquot->dq_sb, 1916 journal_begin(&th, dquot->dq_sb,
1917 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 1917 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
1918 if (ret) 1918 if (ret) {
1919 /* Release dquot anyway to avoid endless cycle in dqput() */
1920 dquot_release(dquot);
1919 goto out; 1921 goto out;
1922 }
1920 ret = dquot_release(dquot); 1923 ret = dquot_release(dquot);
1921 err = 1924 err =
1922 journal_end(&th, dquot->dq_sb, 1925 journal_end(&th, dquot->dq_sb,
@@ -2067,6 +2070,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2067 size_t towrite = len; 2070 size_t towrite = len;
2068 struct buffer_head tmp_bh, *bh; 2071 struct buffer_head tmp_bh, *bh;
2069 2072
2073 if (!current->journal_info) {
2074 printk(KERN_WARNING "reiserfs: Quota write (off=%Lu, len=%Lu)"
2075 " cancelled because transaction is not started.\n",
2076 (unsigned long long)off, (unsigned long long)len);
2077 return -EIO;
2078 }
2070 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2079 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2071 while (towrite > 0) { 2080 while (towrite > 0) {
2072 tocopy = sb->s_blocksize - offset < towrite ? 2081 tocopy = sb->s_blocksize - offset < towrite ?
@@ -2098,7 +2107,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2098 data += tocopy; 2107 data += tocopy;
2099 blk++; 2108 blk++;
2100 } 2109 }
2101 out: 2110out:
2102 if (len == towrite) 2111 if (len == towrite)
2103 return err; 2112 return err;
2104 if (inode->i_size < off + len - towrite) 2113 if (inode->i_size < off + len - towrite)
diff --git a/fs/select.c b/fs/select.c
index a974082b0824..46dca31c607a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -26,8 +26,6 @@
26 26
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28 28
29#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
30
31struct poll_table_page { 29struct poll_table_page {
32 struct poll_table_page * next; 30 struct poll_table_page * next;
33 struct poll_table_entry * entry; 31 struct poll_table_entry * entry;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index a8e293d30034..aefb0be07942 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -11,8 +11,10 @@
11 * Now using anonymous inode source. 11 * Now using anonymous inode source.
12 * Thanks to Oleg Nesterov for useful code review and suggestions. 12 * Thanks to Oleg Nesterov for useful code review and suggestions.
13 * More comments and suggestions from Arnd Bergmann. 13 * More comments and suggestions from Arnd Bergmann.
14 * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br> 14 * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
15 * Retrieve multiple signals with one read() call 15 * Retrieve multiple signals with one read() call
16 * Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
17 * Attach to the sighand only during read() and poll().
16 */ 18 */
17 19
18#include <linux/file.h> 20#include <linux/file.h>
@@ -27,102 +29,12 @@
27#include <linux/signalfd.h> 29#include <linux/signalfd.h>
28 30
29struct signalfd_ctx { 31struct signalfd_ctx {
30 struct list_head lnk;
31 wait_queue_head_t wqh;
32 sigset_t sigmask; 32 sigset_t sigmask;
33 struct task_struct *tsk;
34}; 33};
35 34
36struct signalfd_lockctx {
37 struct task_struct *tsk;
38 unsigned long flags;
39};
40
41/*
42 * Tries to acquire the sighand lock. We do not increment the sighand
43 * use count, and we do not even pin the task struct, so we need to
44 * do it inside an RCU read lock, and we must be prepared for the
45 * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand
46 * being detached. We return 0 if the sighand has been detached, or
47 * 1 if we were able to pin the sighand lock.
48 */
49static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk)
50{
51 struct sighand_struct *sighand = NULL;
52
53 rcu_read_lock();
54 lk->tsk = rcu_dereference(ctx->tsk);
55 if (likely(lk->tsk != NULL))
56 sighand = lock_task_sighand(lk->tsk, &lk->flags);
57 rcu_read_unlock();
58
59 if (!sighand)
60 return 0;
61
62 if (!ctx->tsk) {
63 unlock_task_sighand(lk->tsk, &lk->flags);
64 return 0;
65 }
66
67 if (lk->tsk->tgid == current->tgid)
68 lk->tsk = current;
69
70 return 1;
71}
72
73static void signalfd_unlock(struct signalfd_lockctx *lk)
74{
75 unlock_task_sighand(lk->tsk, &lk->flags);
76}
77
78/*
79 * This must be called with the sighand lock held.
80 */
81void signalfd_deliver(struct task_struct *tsk, int sig)
82{
83 struct sighand_struct *sighand = tsk->sighand;
84 struct signalfd_ctx *ctx, *tmp;
85
86 BUG_ON(!sig);
87 list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) {
88 /*
89 * We use a negative signal value as a way to broadcast that the
90 * sighand has been orphaned, so that we can notify all the
91 * listeners about this. Remember the ctx->sigmask is inverted,
92 * so if the user is interested in a signal, that corresponding
93 * bit will be zero.
94 */
95 if (sig < 0) {
96 if (ctx->tsk == tsk) {
97 ctx->tsk = NULL;
98 list_del_init(&ctx->lnk);
99 wake_up(&ctx->wqh);
100 }
101 } else {
102 if (!sigismember(&ctx->sigmask, sig))
103 wake_up(&ctx->wqh);
104 }
105 }
106}
107
108static void signalfd_cleanup(struct signalfd_ctx *ctx)
109{
110 struct signalfd_lockctx lk;
111
112 /*
113 * This is tricky. If the sighand is gone, we do not need to remove
114 * context from the list, the list itself won't be there anymore.
115 */
116 if (signalfd_lock(ctx, &lk)) {
117 list_del(&ctx->lnk);
118 signalfd_unlock(&lk);
119 }
120 kfree(ctx);
121}
122
123static int signalfd_release(struct inode *inode, struct file *file) 35static int signalfd_release(struct inode *inode, struct file *file)
124{ 36{
125 signalfd_cleanup(file->private_data); 37 kfree(file->private_data);
126 return 0; 38 return 0;
127} 39}
128 40
@@ -130,23 +42,15 @@ static unsigned int signalfd_poll(struct file *file, poll_table *wait)
130{ 42{
131 struct signalfd_ctx *ctx = file->private_data; 43 struct signalfd_ctx *ctx = file->private_data;
132 unsigned int events = 0; 44 unsigned int events = 0;
133 struct signalfd_lockctx lk;
134 45
135 poll_wait(file, &ctx->wqh, wait); 46 poll_wait(file, &current->sighand->signalfd_wqh, wait);
136 47
137 /* 48 spin_lock_irq(&current->sighand->siglock);
138 * Let the caller get a POLLIN in this case, ala socket recv() when 49 if (next_signal(&current->pending, &ctx->sigmask) ||
139 * the peer disconnects. 50 next_signal(&current->signal->shared_pending,
140 */ 51 &ctx->sigmask))
141 if (signalfd_lock(ctx, &lk)) {
142 if ((lk.tsk == current &&
143 next_signal(&lk.tsk->pending, &ctx->sigmask) > 0) ||
144 next_signal(&lk.tsk->signal->shared_pending,
145 &ctx->sigmask) > 0)
146 events |= POLLIN;
147 signalfd_unlock(&lk);
148 } else
149 events |= POLLIN; 52 events |= POLLIN;
53 spin_unlock_irq(&current->sighand->siglock);
150 54
151 return events; 55 return events;
152} 56}
@@ -219,59 +123,46 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
219 int nonblock) 123 int nonblock)
220{ 124{
221 ssize_t ret; 125 ssize_t ret;
222 struct signalfd_lockctx lk;
223 DECLARE_WAITQUEUE(wait, current); 126 DECLARE_WAITQUEUE(wait, current);
224 127
225 if (!signalfd_lock(ctx, &lk)) 128 spin_lock_irq(&current->sighand->siglock);
226 return 0; 129 ret = dequeue_signal(current, &ctx->sigmask, info);
227
228 ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
229 switch (ret) { 130 switch (ret) {
230 case 0: 131 case 0:
231 if (!nonblock) 132 if (!nonblock)
232 break; 133 break;
233 ret = -EAGAIN; 134 ret = -EAGAIN;
234 default: 135 default:
235 signalfd_unlock(&lk); 136 spin_unlock_irq(&current->sighand->siglock);
236 return ret; 137 return ret;
237 } 138 }
238 139
239 add_wait_queue(&ctx->wqh, &wait); 140 add_wait_queue(&current->sighand->signalfd_wqh, &wait);
240 for (;;) { 141 for (;;) {
241 set_current_state(TASK_INTERRUPTIBLE); 142 set_current_state(TASK_INTERRUPTIBLE);
242 ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); 143 ret = dequeue_signal(current, &ctx->sigmask, info);
243 signalfd_unlock(&lk);
244 if (ret != 0) 144 if (ret != 0)
245 break; 145 break;
246 if (signal_pending(current)) { 146 if (signal_pending(current)) {
247 ret = -ERESTARTSYS; 147 ret = -ERESTARTSYS;
248 break; 148 break;
249 } 149 }
150 spin_unlock_irq(&current->sighand->siglock);
250 schedule(); 151 schedule();
251 ret = signalfd_lock(ctx, &lk); 152 spin_lock_irq(&current->sighand->siglock);
252 if (unlikely(!ret)) {
253 /*
254 * Let the caller read zero byte, ala socket
255 * recv() when the peer disconnect. This test
256 * must be done before doing a dequeue_signal(),
257 * because if the sighand has been orphaned,
258 * the dequeue_signal() call is going to crash
259 * because ->sighand will be long gone.
260 */
261 break;
262 }
263 } 153 }
154 spin_unlock_irq(&current->sighand->siglock);
264 155
265 remove_wait_queue(&ctx->wqh, &wait); 156 remove_wait_queue(&current->sighand->signalfd_wqh, &wait);
266 __set_current_state(TASK_RUNNING); 157 __set_current_state(TASK_RUNNING);
267 158
268 return ret; 159 return ret;
269} 160}
270 161
271/* 162/*
272 * Returns either the size of a "struct signalfd_siginfo", or zero if the 163 * Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
273 * sighand we are attached to, has been orphaned. The "count" parameter 164 * error code. The "count" parameter must be at least the size of a
274 * must be at least the size of a "struct signalfd_siginfo". 165 * "struct signalfd_siginfo".
275 */ 166 */
276static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, 167static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
277 loff_t *ppos) 168 loff_t *ppos)
@@ -287,7 +178,6 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
287 return -EINVAL; 178 return -EINVAL;
288 179
289 siginfo = (struct signalfd_siginfo __user *) buf; 180 siginfo = (struct signalfd_siginfo __user *) buf;
290
291 do { 181 do {
292 ret = signalfd_dequeue(ctx, &info, nonblock); 182 ret = signalfd_dequeue(ctx, &info, nonblock);
293 if (unlikely(ret <= 0)) 183 if (unlikely(ret <= 0))
@@ -300,7 +190,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
300 nonblock = 1; 190 nonblock = 1;
301 } while (--count); 191 } while (--count);
302 192
303 return total ? total : ret; 193 return total ? total: ret;
304} 194}
305 195
306static const struct file_operations signalfd_fops = { 196static const struct file_operations signalfd_fops = {
@@ -309,20 +199,13 @@ static const struct file_operations signalfd_fops = {
309 .read = signalfd_read, 199 .read = signalfd_read,
310}; 200};
311 201
312/*
313 * Create a file descriptor that is associated with our signal
314 * state. We can pass it around to others if we want to, but
315 * it will always be _our_ signal state.
316 */
317asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) 202asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask)
318{ 203{
319 int error; 204 int error;
320 sigset_t sigmask; 205 sigset_t sigmask;
321 struct signalfd_ctx *ctx; 206 struct signalfd_ctx *ctx;
322 struct sighand_struct *sighand;
323 struct file *file; 207 struct file *file;
324 struct inode *inode; 208 struct inode *inode;
325 struct signalfd_lockctx lk;
326 209
327 if (sizemask != sizeof(sigset_t) || 210 if (sizemask != sizeof(sigset_t) ||
328 copy_from_user(&sigmask, user_mask, sizeof(sigmask))) 211 copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
@@ -335,17 +218,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
335 if (!ctx) 218 if (!ctx)
336 return -ENOMEM; 219 return -ENOMEM;
337 220
338 init_waitqueue_head(&ctx->wqh);
339 ctx->sigmask = sigmask; 221 ctx->sigmask = sigmask;
340 ctx->tsk = current->group_leader;
341
342 sighand = current->sighand;
343 /*
344 * Add this fd to the list of signal listeners.
345 */
346 spin_lock_irq(&sighand->siglock);
347 list_add_tail(&ctx->lnk, &sighand->signalfd_list);
348 spin_unlock_irq(&sighand->siglock);
349 222
350 /* 223 /*
351 * When we call this, the initialization must be complete, since 224 * When we call this, the initialization must be complete, since
@@ -364,23 +237,18 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
364 fput(file); 237 fput(file);
365 return -EINVAL; 238 return -EINVAL;
366 } 239 }
367 /* 240 spin_lock_irq(&current->sighand->siglock);
368 * We need to be prepared of the fact that the sighand this fd 241 ctx->sigmask = sigmask;
369 * is attached to, has been detched. In that case signalfd_lock() 242 spin_unlock_irq(&current->sighand->siglock);
370 * will return 0, and we'll just skip setting the new mask. 243
371 */ 244 wake_up(&current->sighand->signalfd_wqh);
372 if (signalfd_lock(ctx, &lk)) {
373 ctx->sigmask = sigmask;
374 signalfd_unlock(&lk);
375 }
376 wake_up(&ctx->wqh);
377 fput(file); 245 fput(file);
378 } 246 }
379 247
380 return ufd; 248 return ufd;
381 249
382err_fdalloc: 250err_fdalloc:
383 signalfd_cleanup(ctx); 251 kfree(ctx);
384 return error; 252 return error;
385} 253}
386 254
diff --git a/fs/splice.c b/fs/splice.c
index c010a72ca2d2..e95a36228863 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1224,6 +1224,33 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1224} 1224}
1225 1225
1226/* 1226/*
1227 * Do a copy-from-user while holding the mmap_semaphore for reading, in a
1228 * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1229 * for writing) and page faulting on the user memory pointed to by src.
1230 * This assumes that we will very rarely hit the partial != 0 path, or this
1231 * will not be a win.
1232 */
1233static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
1234{
1235 int partial;
1236
1237 pagefault_disable();
1238 partial = __copy_from_user_inatomic(dst, src, n);
1239 pagefault_enable();
1240
1241 /*
1242 * Didn't copy everything, drop the mmap_sem and do a faulting copy
1243 */
1244 if (unlikely(partial)) {
1245 up_read(&current->mm->mmap_sem);
1246 partial = copy_from_user(dst, src, n);
1247 down_read(&current->mm->mmap_sem);
1248 }
1249
1250 return partial;
1251}
1252
1253/*
1227 * Map an iov into an array of pages and offset/length tupples. With the 1254 * Map an iov into an array of pages and offset/length tupples. With the
1228 * partial_page structure, we can map several non-contiguous ranges into 1255 * partial_page structure, we can map several non-contiguous ranges into
1229 * our ones pages[] map instead of splitting that operation into pieces. 1256 * our ones pages[] map instead of splitting that operation into pieces.
@@ -1236,31 +1263,26 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1236{ 1263{
1237 int buffers = 0, error = 0; 1264 int buffers = 0, error = 0;
1238 1265
1239 /*
1240 * It's ok to take the mmap_sem for reading, even
1241 * across a "get_user()".
1242 */
1243 down_read(&current->mm->mmap_sem); 1266 down_read(&current->mm->mmap_sem);
1244 1267
1245 while (nr_vecs) { 1268 while (nr_vecs) {
1246 unsigned long off, npages; 1269 unsigned long off, npages;
1270 struct iovec entry;
1247 void __user *base; 1271 void __user *base;
1248 size_t len; 1272 size_t len;
1249 int i; 1273 int i;
1250 1274
1251 /* 1275 error = -EFAULT;
1252 * Get user address base and length for this iovec. 1276 if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
1253 */
1254 error = get_user(base, &iov->iov_base);
1255 if (unlikely(error))
1256 break;
1257 error = get_user(len, &iov->iov_len);
1258 if (unlikely(error))
1259 break; 1277 break;
1260 1278
1279 base = entry.iov_base;
1280 len = entry.iov_len;
1281
1261 /* 1282 /*
1262 * Sanity check this iovec. 0 read succeeds. 1283 * Sanity check this iovec. 0 read succeeds.
1263 */ 1284 */
1285 error = 0;
1264 if (unlikely(!len)) 1286 if (unlikely(!len))
1265 break; 1287 break;
1266 error = -EFAULT; 1288 error = -EFAULT;
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 135353f8a296..5afe2a26f5d8 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -248,12 +248,7 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
248 248
249void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr) 249void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
250{ 250{
251 if (sysfs_hash_and_remove(kobj->sd, attr->attr.name) < 0) { 251 sysfs_hash_and_remove(kobj->sd, attr->attr.name);
252 printk(KERN_ERR "%s: "
253 "bad dentry or inode or no such file: \"%s\"\n",
254 __FUNCTION__, attr->attr.name);
255 dump_stack();
256 }
257} 252}
258 253
259EXPORT_SYMBOL_GPL(sysfs_create_bin_file); 254EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 048e6054c2fd..83e76b3813c9 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -762,12 +762,15 @@ static int sysfs_count_nlink(struct sysfs_dirent *sd)
762static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, 762static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
763 struct nameidata *nd) 763 struct nameidata *nd)
764{ 764{
765 struct dentry *ret = NULL;
765 struct sysfs_dirent * parent_sd = dentry->d_parent->d_fsdata; 766 struct sysfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
766 struct sysfs_dirent * sd; 767 struct sysfs_dirent * sd;
767 struct bin_attribute *bin_attr; 768 struct bin_attribute *bin_attr;
768 struct inode *inode; 769 struct inode *inode;
769 int found = 0; 770 int found = 0;
770 771
772 mutex_lock(&sysfs_mutex);
773
771 for (sd = parent_sd->s_children; sd; sd = sd->s_sibling) { 774 for (sd = parent_sd->s_children; sd; sd = sd->s_sibling) {
772 if (sysfs_type(sd) && 775 if (sysfs_type(sd) &&
773 !strcmp(sd->s_name, dentry->d_name.name)) { 776 !strcmp(sd->s_name, dentry->d_name.name)) {
@@ -778,14 +781,14 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
778 781
779 /* no such entry */ 782 /* no such entry */
780 if (!found) 783 if (!found)
781 return NULL; 784 goto out_unlock;
782 785
783 /* attach dentry and inode */ 786 /* attach dentry and inode */
784 inode = sysfs_get_inode(sd); 787 inode = sysfs_get_inode(sd);
785 if (!inode) 788 if (!inode) {
786 return ERR_PTR(-ENOMEM); 789 ret = ERR_PTR(-ENOMEM);
787 790 goto out_unlock;
788 mutex_lock(&sysfs_mutex); 791 }
789 792
790 if (inode->i_state & I_NEW) { 793 if (inode->i_state & I_NEW) {
791 /* initialize inode according to type */ 794 /* initialize inode according to type */
@@ -815,9 +818,9 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
815 sysfs_instantiate(dentry, inode); 818 sysfs_instantiate(dentry, inode);
816 sysfs_attach_dentry(sd, dentry); 819 sysfs_attach_dentry(sd, dentry);
817 820
821 out_unlock:
818 mutex_unlock(&sysfs_mutex); 822 mutex_unlock(&sysfs_mutex);
819 823 return ret;
820 return NULL;
821} 824}
822 825
823const struct inode_operations sysfs_dir_inode_operations = { 826const struct inode_operations sysfs_dir_inode_operations = {
@@ -942,6 +945,8 @@ int sysfs_rename_dir(struct kobject *kobj, struct sysfs_dirent *new_parent_sd,
942 if (error) 945 if (error)
943 goto out_drop; 946 goto out_drop;
944 947
948 mutex_lock(&sysfs_mutex);
949
945 dup_name = sd->s_name; 950 dup_name = sd->s_name;
946 sd->s_name = new_name; 951 sd->s_name = new_name;
947 952
@@ -949,8 +954,6 @@ int sysfs_rename_dir(struct kobject *kobj, struct sysfs_dirent *new_parent_sd,
949 d_add(new_dentry, NULL); 954 d_add(new_dentry, NULL);
950 d_move(sd->s_dentry, new_dentry); 955 d_move(sd->s_dentry, new_dentry);
951 956
952 mutex_lock(&sysfs_mutex);
953
954 sysfs_unlink_sibling(sd); 957 sysfs_unlink_sibling(sd);
955 sysfs_get(new_parent_sd); 958 sysfs_get(new_parent_sd);
956 sysfs_put(sd->s_parent); 959 sysfs_put(sd->s_parent);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 276f7207a564..87e87dcd3f9c 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -540,26 +540,24 @@ static void udf_table_free_blocks(struct super_block *sb,
540 if (epos.offset + adsize > sb->s_blocksize) { 540 if (epos.offset + adsize > sb->s_blocksize) {
541 loffset = epos.offset; 541 loffset = epos.offset;
542 aed->lengthAllocDescs = cpu_to_le32(adsize); 542 aed->lengthAllocDescs = cpu_to_le32(adsize);
543 sptr = UDF_I_DATA(inode) + epos.offset - 543 sptr = UDF_I_DATA(table) + epos.offset - adsize;
544 udf_file_entry_alloc_offset(inode) +
545 UDF_I_LENEATTR(inode) - adsize;
546 dptr = epos.bh->b_data + sizeof(struct allocExtDesc); 544 dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
547 memcpy(dptr, sptr, adsize); 545 memcpy(dptr, sptr, adsize);
548 epos.offset = sizeof(struct allocExtDesc) + adsize; 546 epos.offset = sizeof(struct allocExtDesc) + adsize;
549 } else { 547 } else {
550 loffset = epos.offset + adsize; 548 loffset = epos.offset + adsize;
551 aed->lengthAllocDescs = cpu_to_le32(0); 549 aed->lengthAllocDescs = cpu_to_le32(0);
552 sptr = oepos.bh->b_data + epos.offset;
553 epos.offset = sizeof(struct allocExtDesc);
554
555 if (oepos.bh) { 550 if (oepos.bh) {
551 sptr = oepos.bh->b_data + epos.offset;
556 aed = (struct allocExtDesc *)oepos.bh->b_data; 552 aed = (struct allocExtDesc *)oepos.bh->b_data;
557 aed->lengthAllocDescs = 553 aed->lengthAllocDescs =
558 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 554 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
559 } else { 555 } else {
556 sptr = UDF_I_DATA(table) + epos.offset;
560 UDF_I_LENALLOC(table) += adsize; 557 UDF_I_LENALLOC(table) += adsize;
561 mark_inode_dirty(table); 558 mark_inode_dirty(table);
562 } 559 }
560 epos.offset = sizeof(struct allocExtDesc);
563 } 561 }
564 if (UDF_SB_UDFREV(sb) >= 0x0200) 562 if (UDF_SB_UDFREV(sb) >= 0x0200)
565 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, 563 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 382be7be5ae3..c68a6e730b97 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -89,7 +89,7 @@ static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
89static void udf_load_pvoldesc(struct super_block *, struct buffer_head *); 89static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
90static void udf_load_fileset(struct super_block *, struct buffer_head *, 90static void udf_load_fileset(struct super_block *, struct buffer_head *,
91 kernel_lb_addr *); 91 kernel_lb_addr *);
92static void udf_load_partdesc(struct super_block *, struct buffer_head *); 92static int udf_load_partdesc(struct super_block *, struct buffer_head *);
93static void udf_open_lvid(struct super_block *); 93static void udf_open_lvid(struct super_block *);
94static void udf_close_lvid(struct super_block *); 94static void udf_close_lvid(struct super_block *);
95static unsigned int udf_count_free(struct super_block *); 95static unsigned int udf_count_free(struct super_block *);
@@ -877,7 +877,7 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
877 root->logicalBlockNum, root->partitionReferenceNum); 877 root->logicalBlockNum, root->partitionReferenceNum);
878} 878}
879 879
880static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) 880static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
881{ 881{
882 struct partitionDesc *p; 882 struct partitionDesc *p;
883 int i; 883 int i;
@@ -912,6 +912,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
912 912
913 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table = 913 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
914 udf_iget(sb, loc); 914 udf_iget(sb, loc);
915 if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) {
916 udf_debug("cannot load unallocSpaceTable (part %d)\n",
917 i);
918 return 1;
919 }
915 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE; 920 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
916 udf_debug("unallocSpaceTable (part %d) @ %ld\n", 921 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
917 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino); 922 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
@@ -938,6 +943,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
938 943
939 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table = 944 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
940 udf_iget(sb, loc); 945 udf_iget(sb, loc);
946 if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) {
947 udf_debug("cannot load freedSpaceTable (part %d)\n",
948 i);
949 return 1;
950 }
941 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE; 951 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
942 udf_debug("freedSpaceTable (part %d) @ %ld\n", 952 udf_debug("freedSpaceTable (part %d) @ %ld\n",
943 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino); 953 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
@@ -966,6 +976,7 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
966 le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i), 976 le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
967 UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i)); 977 UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
968 } 978 }
979 return 0;
969} 980}
970 981
971static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh, 982static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
@@ -1177,12 +1188,19 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo
1177 udf_load_logicalvol(sb, bh, fileset); 1188 udf_load_logicalvol(sb, bh, fileset);
1178 } else if (i == VDS_POS_PARTITION_DESC) { 1189 } else if (i == VDS_POS_PARTITION_DESC) {
1179 struct buffer_head *bh2 = NULL; 1190 struct buffer_head *bh2 = NULL;
1180 udf_load_partdesc(sb, bh); 1191 if (udf_load_partdesc(sb, bh)) {
1192 brelse(bh);
1193 return 1;
1194 }
1181 for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) { 1195 for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) {
1182 bh2 = udf_read_tagged(sb, j, j, &ident); 1196 bh2 = udf_read_tagged(sb, j, j, &ident);
1183 gd = (struct generic_desc *)bh2->b_data; 1197 gd = (struct generic_desc *)bh2->b_data;
1184 if (ident == TAG_IDENT_PD) 1198 if (ident == TAG_IDENT_PD)
1185 udf_load_partdesc(sb, bh2); 1199 if (udf_load_partdesc(sb, bh2)) {
1200 brelse(bh);
1201 brelse(bh2);
1202 return 1;
1203 }
1186 brelse(bh2); 1204 brelse(bh2);
1187 } 1205 }
1188 } 1206 }
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 73402c5eeb8a..38eb0b7a1f3d 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
894 goto again; 894 goto again;
895 } 895 }
896 896
897 897 sbi->s_flags = flags;/*after that line some functions use s_flags*/
898 ufs_print_super_stuff(sb, usb1, usb2, usb3); 898 ufs_print_super_stuff(sb, usb1, usb2, usb3);
899 899
900 /* 900 /*
@@ -1025,8 +1025,6 @@ magic_found:
1025 UFS_MOUNT_UFSTYPE_44BSD) 1025 UFS_MOUNT_UFSTYPE_44BSD)
1026 uspi->s_maxsymlinklen = 1026 uspi->s_maxsymlinklen =
1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1028
1029 sbi->s_flags = flags;
1030 1028
1031 inode = iget(sb, UFS_ROOTINO); 1029 inode = iget(sb, UFS_ROOTINO);
1032 if (!inode || is_bad_inode(inode)) 1030 if (!inode || is_bad_inode(inode))
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index b4acc7f3c374..e6ea293f303c 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
103static inline int 103static inline int
104kmem_shake_allow(gfp_t gfp_mask) 104kmem_shake_allow(gfp_t gfp_mask)
105{ 105{
106 return (gfp_mask & __GFP_WAIT); 106 return (gfp_mask & __GFP_WAIT) != 0;
107} 107}
108 108
109#endif /* __XFS_SUPPORT_KMEM_H__ */ 109#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fd4105d662e0..5f152f60d74d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -181,6 +181,7 @@ xfs_setfilesize(
181 ip->i_d.di_size = isize; 181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1; 182 ip->i_update_core = 1;
183 ip->i_update_size = 1; 183 ip->i_update_size = 1;
184 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
184 } 185 }
185 186
186 xfs_iunlock(ip, XFS_ILOCK_EXCL); 187 xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -652,7 +653,7 @@ xfs_probe_cluster(
652 653
653 for (i = 0; i < pagevec_count(&pvec); i++) { 654 for (i = 0; i < pagevec_count(&pvec); i++) {
654 struct page *page = pvec.pages[i]; 655 struct page *page = pvec.pages[i];
655 size_t pg_offset, len = 0; 656 size_t pg_offset, pg_len = 0;
656 657
657 if (tindex == tlast) { 658 if (tindex == tlast) {
658 pg_offset = 659 pg_offset =
@@ -665,16 +666,16 @@ xfs_probe_cluster(
665 pg_offset = PAGE_CACHE_SIZE; 666 pg_offset = PAGE_CACHE_SIZE;
666 667
667 if (page->index == tindex && !TestSetPageLocked(page)) { 668 if (page->index == tindex && !TestSetPageLocked(page)) {
668 len = xfs_probe_page(page, pg_offset, mapped); 669 pg_len = xfs_probe_page(page, pg_offset, mapped);
669 unlock_page(page); 670 unlock_page(page);
670 } 671 }
671 672
672 if (!len) { 673 if (!pg_len) {
673 done = 1; 674 done = 1;
674 break; 675 break;
675 } 676 }
676 677
677 total += len; 678 total += pg_len;
678 tindex++; 679 tindex++;
679 } 680 }
680 681
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index bb72c3d4141f..81565dea9af7 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -46,7 +46,7 @@ xfs_param_t xfs_params = {
46 .inherit_nosym = { 0, 0, 1 }, 46 .inherit_nosym = { 0, 0, 1 },
47 .rotorstep = { 1, 1, 255 }, 47 .rotorstep = { 1, 1, 255 },
48 .inherit_nodfrg = { 0, 1, 1 }, 48 .inherit_nodfrg = { 0, 1, 1 },
49 .fstrm_timer = { 1, 50, 3600*100}, 49 .fstrm_timer = { 1, 30*100, 3600*100},
50}; 50};
51 51
52/* 52/*
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4528f9a3f304..491d1f4f202d 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -415,8 +415,10 @@ xfs_fs_write_inode(
415 415
416 if (vp) { 416 if (vp) {
417 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); 417 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
418 if (sync) 418 if (sync) {
419 filemap_fdatawait(inode->i_mapping);
419 flags |= FLUSH_SYNC; 420 flags |= FLUSH_SYNC;
421 }
420 error = bhv_vop_iflush(vp, flags); 422 error = bhv_vop_iflush(vp, flags);
421 if (error == EAGAIN) 423 if (error == EAGAIN)
422 error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0; 424 error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d274b23ade5..6ff0f4de1630 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -120,7 +120,8 @@ xfs_Gqm_init(void)
120 * Initialize the dquot hash tables. 120 * Initialize the dquot hash tables.
121 */ 121 */
122 udqhash = kmem_zalloc_greedy(&hsize, 122 udqhash = kmem_zalloc_greedy(&hsize,
123 XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH, 123 XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
124 XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
124 KM_SLEEP | KM_MAYFAIL | KM_LARGE); 125 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
125 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE); 126 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
126 hsize /= sizeof(xfs_dqhash_t); 127 hsize /= sizeof(xfs_dqhash_t);
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index a27a7c8c0526..855da0408647 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -34,10 +34,10 @@ extern void cmn_err(int, char *, ...)
34extern void assfail(char *expr, char *f, int l); 34extern void assfail(char *expr, char *f, int l);
35 35
36#define ASSERT_ALWAYS(expr) \ 36#define ASSERT_ALWAYS(expr) \
37 (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 37 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
38 38
39#ifndef DEBUG 39#ifndef DEBUG
40# define ASSERT(expr) ((void)0) 40#define ASSERT(expr) ((void)0)
41 41
42#ifndef STATIC 42#ifndef STATIC
43# define STATIC static noinline 43# define STATIC static noinline
@@ -49,8 +49,10 @@ extern void assfail(char *expr, char *f, int l);
49 49
50#else /* DEBUG */ 50#else /* DEBUG */
51 51
52# define ASSERT(expr) ASSERT_ALWAYS(expr) 52#include <linux/random.h>
53# include <linux/random.h> 53
54#define ASSERT(expr) \
55 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
54 56
55#ifndef STATIC 57#ifndef STATIC
56# define STATIC noinline 58# define STATIC noinline
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index aea37df4aa62..26d09e2e1a7f 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1975,7 +1975,6 @@ xfs_da_do_buf(
1975 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); 1975 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
1976 if (unlikely(error == EFSCORRUPTED)) { 1976 if (unlikely(error == EFSCORRUPTED)) {
1977 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 1977 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
1978 int i;
1979 cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", 1978 cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
1980 (long long)bno); 1979 (long long)bno);
1981 cmn_err(CE_ALERT, "dir: inode %lld\n", 1980 cmn_err(CE_ALERT, "dir: inode %lld\n",
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce2278611bb7..36d8f6aa11af 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
350/* xfs_fstrm_free_func(): callback for freeing cached stream items. */ 350/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
351void 351void
352xfs_fstrm_free_func( 352xfs_fstrm_free_func(
353 xfs_ino_t ino, 353 unsigned long ino,
354 fstrm_item_t *item) 354 void *data)
355{ 355{
356 fstrm_item_t *item = (fstrm_item_t *)data;
356 xfs_inode_t *ip = item->ip; 357 xfs_inode_t *ip = item->ip;
357 int ref; 358 int ref;
358 359
@@ -438,7 +439,7 @@ xfs_filestream_mount(
438 grp_count = 10; 439 grp_count = 10;
439 440
440 err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, 441 err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
441 (xfs_mru_cache_free_func_t)xfs_fstrm_free_func); 442 xfs_fstrm_free_func);
442 443
443 return err; 444 return err;
444} 445}
@@ -467,8 +468,7 @@ void
467xfs_filestream_flush( 468xfs_filestream_flush(
468 xfs_mount_t *mp) 469 xfs_mount_t *mp)
469{ 470{
470 /* point in time flush, so keep the reaper running */ 471 xfs_mru_cache_flush(mp->m_filestream);
471 xfs_mru_cache_flush(mp->m_filestream, 1);
472} 472}
473 473
474/* 474/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9d4c4fbeb3ee..9bfb69e1e885 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2185,13 +2185,13 @@ xlog_state_do_callback(
2185 } 2185 }
2186 cb = iclog->ic_callback; 2186 cb = iclog->ic_callback;
2187 2187
2188 while (cb != 0) { 2188 while (cb) {
2189 iclog->ic_callback_tail = &(iclog->ic_callback); 2189 iclog->ic_callback_tail = &(iclog->ic_callback);
2190 iclog->ic_callback = NULL; 2190 iclog->ic_callback = NULL;
2191 LOG_UNLOCK(log, s); 2191 LOG_UNLOCK(log, s);
2192 2192
2193 /* perform callbacks in the order given */ 2193 /* perform callbacks in the order given */
2194 for (; cb != 0; cb = cb_next) { 2194 for (; cb; cb = cb_next) {
2195 cb_next = cb->cb_next; 2195 cb_next = cb->cb_next;
2196 cb->cb_func(cb->cb_arg, aborted); 2196 cb->cb_func(cb->cb_arg, aborted);
2197 } 2197 }
@@ -2202,7 +2202,7 @@ xlog_state_do_callback(
2202 loopdidcallbacks++; 2202 loopdidcallbacks++;
2203 funcdidcallbacks++; 2203 funcdidcallbacks++;
2204 2204
2205 ASSERT(iclog->ic_callback == 0); 2205 ASSERT(iclog->ic_callback == NULL);
2206 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2206 if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2207 iclog->ic_state = XLOG_STATE_DIRTY; 2207 iclog->ic_state = XLOG_STATE_DIRTY;
2208 2208
@@ -3242,10 +3242,10 @@ xlog_ticket_put(xlog_t *log,
3242#else 3242#else
3243 /* When we debug, it is easier if tickets are cycled */ 3243 /* When we debug, it is easier if tickets are cycled */
3244 ticket->t_next = NULL; 3244 ticket->t_next = NULL;
3245 if (log->l_tail != 0) { 3245 if (log->l_tail) {
3246 log->l_tail->t_next = ticket; 3246 log->l_tail->t_next = ticket;
3247 } else { 3247 } else {
3248 ASSERT(log->l_freelist == 0); 3248 ASSERT(log->l_freelist == NULL);
3249 log->l_freelist = ticket; 3249 log->l_freelist = ticket;
3250 } 3250 }
3251 log->l_tail = ticket; 3251 log->l_tail = ticket;
@@ -3463,7 +3463,7 @@ xlog_verify_iclog(xlog_t *log,
3463 s = LOG_LOCK(log); 3463 s = LOG_LOCK(log);
3464 icptr = log->l_iclog; 3464 icptr = log->l_iclog;
3465 for (i=0; i < log->l_iclog_bufs; i++) { 3465 for (i=0; i < log->l_iclog_bufs; i++) {
3466 if (icptr == 0) 3466 if (icptr == NULL)
3467 xlog_panic("xlog_verify_iclog: invalid ptr"); 3467 xlog_panic("xlog_verify_iclog: invalid ptr");
3468 icptr = icptr->ic_next; 3468 icptr = icptr->ic_next;
3469 } 3469 }
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fddbb091a86f..8ae6e8e5f3db 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1366,7 +1366,7 @@ xlog_recover_add_to_cont_trans(
1366 int old_len; 1366 int old_len;
1367 1367
1368 item = trans->r_itemq; 1368 item = trans->r_itemq;
1369 if (item == 0) { 1369 if (item == NULL) {
1370 /* finish copying rest of trans header */ 1370 /* finish copying rest of trans header */
1371 xlog_recover_add_item(&trans->r_itemq); 1371 xlog_recover_add_item(&trans->r_itemq);
1372 ptr = (xfs_caddr_t) &trans->r_theader + 1372 ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1412,7 +1412,7 @@ xlog_recover_add_to_trans(
1412 if (!len) 1412 if (!len)
1413 return 0; 1413 return 0;
1414 item = trans->r_itemq; 1414 item = trans->r_itemq;
1415 if (item == 0) { 1415 if (item == NULL) {
1416 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); 1416 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1417 if (len == sizeof(xfs_trans_header_t)) 1417 if (len == sizeof(xfs_trans_header_t))
1418 xlog_recover_add_item(&trans->r_itemq); 1418 xlog_recover_add_item(&trans->r_itemq);
@@ -1467,12 +1467,12 @@ xlog_recover_unlink_tid(
1467 xlog_recover_t *tp; 1467 xlog_recover_t *tp;
1468 int found = 0; 1468 int found = 0;
1469 1469
1470 ASSERT(trans != 0); 1470 ASSERT(trans != NULL);
1471 if (trans == *q) { 1471 if (trans == *q) {
1472 *q = (*q)->r_next; 1472 *q = (*q)->r_next;
1473 } else { 1473 } else {
1474 tp = *q; 1474 tp = *q;
1475 while (tp != 0) { 1475 while (tp) {
1476 if (tp->r_next == trans) { 1476 if (tp->r_next == trans) {
1477 found = 1; 1477 found = 1;
1478 break; 1478 break;
@@ -1495,7 +1495,7 @@ xlog_recover_insert_item_backq(
1495 xlog_recover_item_t **q, 1495 xlog_recover_item_t **q,
1496 xlog_recover_item_t *item) 1496 xlog_recover_item_t *item)
1497{ 1497{
1498 if (*q == 0) { 1498 if (*q == NULL) {
1499 item->ri_prev = item->ri_next = item; 1499 item->ri_prev = item->ri_next = item;
1500 *q = item; 1500 *q = item;
1501 } else { 1501 } else {
@@ -1899,7 +1899,7 @@ xlog_recover_do_reg_buffer(
1899 break; 1899 break;
1900 nbits = xfs_contig_bits(data_map, map_size, bit); 1900 nbits = xfs_contig_bits(data_map, map_size, bit);
1901 ASSERT(nbits > 0); 1901 ASSERT(nbits > 0);
1902 ASSERT(item->ri_buf[i].i_addr != 0); 1902 ASSERT(item->ri_buf[i].i_addr != NULL);
1903 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0); 1903 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1904 ASSERT(XFS_BUF_COUNT(bp) >= 1904 ASSERT(XFS_BUF_COUNT(bp) >=
1905 ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT)); 1905 ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7deb9e3cbbd3..e0b358c1c533 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
206 */ 206 */
207 if (!_xfs_mru_cache_migrate(mru, now)) { 207 if (!_xfs_mru_cache_migrate(mru, now)) {
208 mru->time_zero = now; 208 mru->time_zero = now;
209 if (!mru->next_reap) 209 if (!mru->queued) {
210 mru->next_reap = mru->grp_count * mru->grp_time; 210 mru->queued = 1;
211 queue_delayed_work(xfs_mru_reap_wq, &mru->work,
212 mru->grp_count * mru->grp_time);
213 }
211 } else { 214 } else {
212 grp = (now - mru->time_zero) / mru->grp_time; 215 grp = (now - mru->time_zero) / mru->grp_time;
213 grp = (mru->lru_grp + grp) % mru->grp_count; 216 grp = (mru->lru_grp + grp) % mru->grp_count;
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
271 struct work_struct *work) 274 struct work_struct *work)
272{ 275{
273 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); 276 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
274 unsigned long now; 277 unsigned long now, next;
275 278
276 ASSERT(mru && mru->lists); 279 ASSERT(mru && mru->lists);
277 if (!mru || !mru->lists) 280 if (!mru || !mru->lists)
278 return; 281 return;
279 282
280 mutex_spinlock(&mru->lock); 283 mutex_spinlock(&mru->lock);
281 now = jiffies; 284 next = _xfs_mru_cache_migrate(mru, jiffies);
282 if (mru->reap_all || 285 _xfs_mru_cache_clear_reap_list(mru);
283 (mru->next_reap && time_after(now, mru->next_reap))) { 286
284 if (mru->reap_all) 287 mru->queued = next;
285 now += mru->grp_count * mru->grp_time * 2; 288 if ((mru->queued > 0)) {
286 mru->next_reap = _xfs_mru_cache_migrate(mru, now); 289 now = jiffies;
287 _xfs_mru_cache_clear_reap_list(mru); 290 if (next <= now)
291 next = 0;
292 else
293 next -= now;
294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
288 } 295 }
289 296
290 /*
291 * the process that triggered the reap_all is responsible
292 * for restating the periodic reap if it is required.
293 */
294 if (!mru->reap_all)
295 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
296 mru->reap_all = 0;
297 mutex_spinunlock(&mru->lock, 0); 297 mutex_spinunlock(&mru->lock, 0);
298} 298}
299 299
@@ -352,7 +352,7 @@ xfs_mru_cache_create(
352 352
353 /* An extra list is needed to avoid reaping up to a grp_time early. */ 353 /* An extra list is needed to avoid reaping up to a grp_time early. */
354 mru->grp_count = grp_count + 1; 354 mru->grp_count = grp_count + 1;
355 mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 355 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
356 356
357 if (!mru->lists) { 357 if (!mru->lists) {
358 err = ENOMEM; 358 err = ENOMEM;
@@ -374,11 +374,6 @@ xfs_mru_cache_create(
374 mru->grp_time = grp_time; 374 mru->grp_time = grp_time;
375 mru->free_func = free_func; 375 mru->free_func = free_func;
376 376
377 /* start up the reaper event */
378 mru->next_reap = 0;
379 mru->reap_all = 0;
380 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
381
382 *mrup = mru; 377 *mrup = mru;
383 378
384exit: 379exit:
@@ -394,35 +389,25 @@ exit:
394 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their 389 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
395 * free functions as they're deleted. When this function returns, the caller is 390 * free functions as they're deleted. When this function returns, the caller is
396 * guaranteed that all the free functions for all the elements have finished 391 * guaranteed that all the free functions for all the elements have finished
397 * executing. 392 * executing and the reaper is not running.
398 *
399 * While we are flushing, we stop the periodic reaper event from triggering.
400 * Normally, we want to restart this periodic event, but if we are shutting
401 * down the cache we do not want it restarted. hence the restart parameter
402 * where 0 = do not restart reaper and 1 = restart reaper.
403 */ 393 */
404void 394void
405xfs_mru_cache_flush( 395xfs_mru_cache_flush(
406 xfs_mru_cache_t *mru, 396 xfs_mru_cache_t *mru)
407 int restart)
408{ 397{
409 if (!mru || !mru->lists) 398 if (!mru || !mru->lists)
410 return; 399 return;
411 400
412 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
413
414 mutex_spinlock(&mru->lock); 401 mutex_spinlock(&mru->lock);
415 mru->reap_all = 1; 402 if (mru->queued) {
416 mutex_spinunlock(&mru->lock, 0); 403 mutex_spinunlock(&mru->lock, 0);
404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
405 mutex_spinlock(&mru->lock);
406 }
417 407
418 queue_work(xfs_mru_reap_wq, &mru->work.work); 408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
419 flush_workqueue(xfs_mru_reap_wq); 409 _xfs_mru_cache_clear_reap_list(mru);
420 410
421 mutex_spinlock(&mru->lock);
422 WARN_ON_ONCE(mru->reap_all != 0);
423 mru->reap_all = 0;
424 if (restart)
425 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
426 mutex_spinunlock(&mru->lock, 0); 411 mutex_spinunlock(&mru->lock, 0);
427} 412}
428 413
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
433 if (!mru || !mru->lists) 418 if (!mru || !mru->lists)
434 return; 419 return;
435 420
436 /* we don't want the reaper to restart here */ 421 xfs_mru_cache_flush(mru);
437 xfs_mru_cache_flush(mru, 0);
438 422
439 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 423 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
440 kmem_free(mru, sizeof(*mru)); 424 kmem_free(mru, sizeof(*mru));
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 624fd10ee8e5..dd58ea1bbebe 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
32 unsigned int grp_time; /* Time period spanned by grps. */ 32 unsigned int grp_time; /* Time period spanned by grps. */
33 unsigned int lru_grp; /* Group containing time zero. */ 33 unsigned int lru_grp; /* Group containing time zero. */
34 unsigned long time_zero; /* Time first element was added. */ 34 unsigned long time_zero; /* Time first element was added. */
35 unsigned long next_reap; /* Time that the reaper should
36 next do something. */
37 unsigned int reap_all; /* if set, reap all lists */
38 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ 35 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
39 struct delayed_work work; /* Workqueue data for reaping. */ 36 struct delayed_work work; /* Workqueue data for reaping. */
37 unsigned int queued; /* work has been queued */
40} xfs_mru_cache_t; 38} xfs_mru_cache_t;
41 39
42int xfs_mru_cache_init(void); 40int xfs_mru_cache_init(void);
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
44int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, 42int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
45 unsigned int grp_count, 43 unsigned int grp_count,
46 xfs_mru_cache_free_func_t free_func); 44 xfs_mru_cache_free_func_t free_func);
47void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart); 45void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
48void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); 46void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
49int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, 47int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
50 void *value); 48 void *value);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1a5ad8cd97b0..603459229904 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1082,6 +1082,9 @@ xfs_fsync(
1082 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 1082 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1083 return XFS_ERROR(EIO); 1083 return XFS_ERROR(EIO);
1084 1084
1085 if (flag & FSYNC_DATA)
1086 filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
1087
1085 /* 1088 /*
1086 * We always need to make sure that the required inode state 1089 * We always need to make sure that the required inode state
1087 * is safe on disk. The vnode might be clean but because 1090 * is safe on disk. The vnode might be clean but because
@@ -3769,12 +3772,16 @@ xfs_inode_flush(
3769 sync_lsn = log->l_last_sync_lsn; 3772 sync_lsn = log->l_last_sync_lsn;
3770 GRANT_UNLOCK(log, s); 3773 GRANT_UNLOCK(log, s);
3771 3774
3772 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0)) 3775 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
3773 return 0; 3776 if (flags & FLUSH_SYNC)
3777 log_flags |= XFS_LOG_SYNC;
3778 error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
3779 if (error)
3780 return error;
3781 }
3774 3782
3775 if (flags & FLUSH_SYNC) 3783 if (ip->i_update_core == 0)
3776 log_flags |= XFS_LOG_SYNC; 3784 return 0;
3777 return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
3778 } 3785 }
3779 } 3786 }
3780 3787
@@ -3788,9 +3795,6 @@ xfs_inode_flush(
3788 if (flags & FLUSH_INODE) { 3795 if (flags & FLUSH_INODE) {
3789 int flush_flags; 3796 int flush_flags;
3790 3797
3791 if (xfs_ipincount(ip))
3792 return EAGAIN;
3793
3794 if (flags & FLUSH_SYNC) { 3798 if (flags & FLUSH_SYNC) {
3795 xfs_ilock(ip, XFS_ILOCK_SHARED); 3799 xfs_ilock(ip, XFS_ILOCK_SHARED);
3796 xfs_iflock(ip); 3800 xfs_iflock(ip);