aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c2
-rw-r--r--fs/Kconfig27
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/flock.c1
-rw-r--r--fs/afs/mntpt.c1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/aio.c24
-rw-r--r--fs/autofs4/dev-ioctl.c1
-rw-r--r--fs/bfs/dir.c1
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/binfmt_flat.c17
-rw-r--r--fs/bio-integrity.c170
-rw-r--r--fs/bio.c33
-rw-r--r--fs/block_dev.c10
-rw-r--r--fs/btrfs/async-thread.c6
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c121
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/disk-io.c15
-rw-r--r--fs/btrfs/extent-tree.c1096
-rw-r--r--fs/btrfs/file.c6
-rw-r--r--fs/btrfs/free-space-cache.c1058
-rw-r--r--fs/btrfs/free-space-cache.h8
-rw-r--r--fs/btrfs/inode.c31
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/transaction.c60
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c46
-rw-r--r--fs/btrfs/zlib.c6
-rw-r--r--fs/char_dev.c1
-rw-r--r--fs/cifs/CHANGES13
-rw-r--r--fs/cifs/README25
-rw-r--r--fs/cifs/asn1.c55
-rw-r--r--fs/cifs/cifs_debug.c8
-rw-r--r--fs/cifs/cifs_dfs_ref.c12
-rw-r--r--fs/cifs/cifs_spnego.c9
-rw-r--r--fs/cifs/cifs_unicode.c2
-rw-r--r--fs/cifs/cifsacl.c26
-rw-r--r--fs/cifs/cifsfs.c162
-rw-r--r--fs/cifs/cifsfs.h15
-rw-r--r--fs/cifs/cifsglob.h32
-rw-r--r--fs/cifs/cifspdu.h14
-rw-r--r--fs/cifs/cifsproto.h23
-rw-r--r--fs/cifs/cifssmb.c152
-rw-r--r--fs/cifs/connect.c103
-rw-r--r--fs/cifs/dir.c52
-rw-r--r--fs/cifs/dns_resolve.c25
-rw-r--r--fs/cifs/file.c40
-rw-r--r--fs/cifs/inode.c780
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/netmisc.c56
-rw-r--r--fs/cifs/readdir.c505
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/xattr.c12
-rw-r--r--fs/compat.c5
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/dlm/plock.c17
-rw-r--r--fs/ecryptfs/keystore.c13
-rw-r--r--fs/eventfd.c122
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/common.h4
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/exofs.h7
-rw-r--r--fs/exofs/file.c21
-rw-r--r--fs/exofs/inode.c7
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/exofs/super.c7
-rw-r--r--fs/exofs/symlink.c4
-rw-r--r--fs/ext2/ioctl.c1
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/ext3/dir.c3
-rw-r--r--fs/ext3/inode.c32
-rw-r--r--fs/ext4/ext4.h14
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c384
-rw-r--r--fs/ext4/ioctl.c21
-rw-r--r--fs/ext4/mballoc.c50
-rw-r--r--fs/fat/dir.c1
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fat/namei_msdos.c1
-rw-r--r--fs/fat/namei_vfat.c1
-rw-r--r--fs/fcntl.c1
-rw-r--r--fs/freevxfs/vxfs_super.c1
-rw-r--r--fs/fuse/dev.c91
-rw-r--r--fs/fuse/dir.c57
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h27
-rw-r--r--fs/fuse/inode.c68
-rw-r--r--fs/gfs2/aops.c39
-rw-r--r--fs/gfs2/glock.c138
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/rgrp.c23
-rw-r--r--fs/gfs2/super.c40
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/trace_gfs2.h8
-rw-r--r--fs/hfs/super.c1
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hpfs/hpfs_fn.h1
-rw-r--r--fs/hpfs/inode.c1
-rw-r--r--fs/hpfs/namei.c1
-rw-r--r--fs/inode.c40
-rw-r--r--fs/isofs/inode.c4
-rw-r--r--fs/jbd/journal.c26
-rw-r--r--fs/jbd/transaction.c68
-rw-r--r--fs/jbd2/journal.c31
-rw-r--r--fs/jbd2/transaction.c68
-rw-r--r--fs/jffs2/erase.c10
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/lockd/clntproc.c1
-rw-r--r--fs/lockd/svc4proc.c1
-rw-r--r--fs/lockd/svcproc.c1
-rw-r--r--fs/namei.c7
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/client.c18
-rw-r--r--fs/nfs/delegation.c1
-rw-r--r--fs/nfs/dir.c3
-rw-r--r--fs/nfs/file.c1
-rw-r--r--fs/nfs/getroot.c1
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs4_fs.h6
-rw-r--r--fs/nfs/nfs4proc.c41
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/read.c1
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/nfssvc.c1
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/Kconfig25
-rw-r--r--fs/nilfs2/bmap.c5
-rw-r--r--fs/nilfs2/cpfile.c5
-rw-r--r--fs/nilfs2/dat.c9
-rw-r--r--fs/nilfs2/dir.c1
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/segment.c44
-rw-r--r--fs/notify/Kconfig12
-rw-r--r--fs/notify/dnotify/Kconfig2
-rw-r--r--fs/notify/fsnotify.c4
-rw-r--r--fs/notify/inotify/Kconfig2
-rw-r--r--fs/notify/inotify/inotify_user.c112
-rw-r--r--fs/notify/notification.c19
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/partitions/check.c2
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/quota/dquot.c9
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/squashfs/super.c1
-rw-r--r--fs/sync.c5
-rw-r--r--fs/sysfs/bin.c1
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/ubifs/io.c57
-rw-r--r--fs/ubifs/ioctl.c1
-rw-r--r--fs/ubifs/recovery.c57
-rw-r--r--fs/ubifs/replay.c9
-rw-r--r--fs/ubifs/scan.c20
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--fs/ubifs/ubifs.h11
-rw-r--r--fs/udf/super.c12
-rw-r--r--fs/xfs/linux-2.6/kmem.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_iget.c142
-rw-r--r--fs/xfs/xfs_inode.h17
187 files changed, 4452 insertions, 2786 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 6fcb1e7095c..92828281a30 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,7 +57,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
57 buffer = kmap(page); 57 buffer = kmap(page);
58 offset = page_offset(page); 58 offset = page_offset(page);
59 59
60 retval = v9fs_file_readn(filp, buffer, NULL, offset, PAGE_CACHE_SIZE); 60 retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
61 if (retval < 0) 61 if (retval < 0)
62 goto done; 62 goto done;
63 63
diff --git a/fs/Kconfig b/fs/Kconfig
index a97263be6a9..0e7da7bb5d9 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -186,32 +186,7 @@ source "fs/romfs/Kconfig"
186source "fs/sysv/Kconfig" 186source "fs/sysv/Kconfig"
187source "fs/ufs/Kconfig" 187source "fs/ufs/Kconfig"
188source "fs/exofs/Kconfig" 188source "fs/exofs/Kconfig"
189 189source "fs/nilfs2/Kconfig"
190config NILFS2_FS
191 tristate "NILFS2 file system support (EXPERIMENTAL)"
192 depends on BLOCK && EXPERIMENTAL
193 select CRC32
194 help
195 NILFS2 is a log-structured file system (LFS) supporting continuous
196 snapshotting. In addition to versioning capability of the entire
197 file system, users can even restore files mistakenly overwritten or
198 destroyed just a few seconds ago. Since this file system can keep
199 consistency like conventional LFS, it achieves quick recovery after
200 system crashes.
201
202 NILFS2 creates a number of checkpoints every few seconds or per
203 synchronous write basis (unless there is no change). Users can
204 select significant versions among continuously created checkpoints,
205 and can change them into snapshots which will be preserved for long
206 periods until they are changed back to checkpoints. Each
207 snapshot is mountable as a read-only file system concurrently with
208 its writable mount, and this feature is convenient for online backup.
209
210 Some features including atime, extended attributes, and POSIX ACLs,
211 are not supported yet.
212
213 To compile this file system support as a module, choose M here: the
214 module will be called nilfs2. If unsure, say N.
215 190
216endif # MISC_FILESYSTEMS 191endif # MISC_FILESYSTEMS
217 192
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index aad92f0a104..6910a98bd73 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -13,6 +13,7 @@
13#include <linux/parser.h> 13#include <linux/parser.h>
14#include <linux/mount.h> 14#include <linux/mount.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/smp_lock.h>
16#include <linux/statfs.h> 17#include <linux/statfs.h>
17#include "adfs.h" 18#include "adfs.h"
18#include "dir_f.h" 19#include "dir_f.h"
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9bd757774c9..88067f36e5e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -564,7 +564,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
564static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) 564static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
565{ 565{
566 struct afs_vnode *vnode, *dir; 566 struct afs_vnode *vnode, *dir;
567 struct afs_fid fid; 567 struct afs_fid uninitialized_var(fid);
568 struct dentry *parent; 568 struct dentry *parent;
569 struct key *key; 569 struct key *key;
570 void *dir_version; 570 void *dir_version;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 210acafe4a9..3ff8bdd18fb 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -432,7 +432,6 @@ vfs_rejected_lock:
432 list_del_init(&fl->fl_u.afs.link); 432 list_del_init(&fl->fl_u.afs.link);
433 if (list_empty(&vnode->granted_locks)) 433 if (list_empty(&vnode->granted_locks))
434 afs_defer_unlock(vnode, key); 434 afs_defer_unlock(vnode, key);
435 spin_unlock(&vnode->lock);
436 goto abort_attempt; 435 goto abort_attempt;
437} 436}
438 437
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index c52be53f694..5ffb570cd3a 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -17,7 +17,6 @@
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/namei.h> 19#include <linux/namei.h>
20#include <linux/mnt_namespace.h>
21#include "internal.h" 20#include "internal.h"
22 21
23 22
diff --git a/fs/afs/super.c b/fs/afs/super.c
index ad0514d0115..e1ea1c240b6 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/smp_lock.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22#include <linux/pagemap.h> 23#include <linux/pagemap.h>
23#include <linux/parser.h> 24#include <linux/parser.h>
diff --git a/fs/aio.c b/fs/aio.c
index 76da1253795..d065b2c3273 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
485{ 485{
486 assert_spin_locked(&ctx->ctx_lock); 486 assert_spin_locked(&ctx->ctx_lock);
487 487
488 if (req->ki_eventfd != NULL)
489 eventfd_ctx_put(req->ki_eventfd);
488 if (req->ki_dtor) 490 if (req->ki_dtor)
489 req->ki_dtor(req); 491 req->ki_dtor(req);
490 if (req->ki_iovec != &req->ki_inline_vec) 492 if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
509 /* Complete the fput(s) */ 511 /* Complete the fput(s) */
510 if (req->ki_filp != NULL) 512 if (req->ki_filp != NULL)
511 __fput(req->ki_filp); 513 __fput(req->ki_filp);
512 if (req->ki_eventfd != NULL)
513 __fput(req->ki_eventfd);
514 514
515 /* Link the iocb into the context's free list */ 515 /* Link the iocb into the context's free list */
516 spin_lock_irq(&ctx->ctx_lock); 516 spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
528 */ 528 */
529static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 529static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
530{ 530{
531 int schedule_putreq = 0;
532
533 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 531 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
534 req, atomic_long_read(&req->ki_filp->f_count)); 532 req, atomic_long_read(&req->ki_filp->f_count));
535 533
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
549 * we would not be holding the last reference to the file*, so 547 * we would not be holding the last reference to the file*, so
550 * this function will be executed w/out any aio kthread wakeup. 548 * this function will be executed w/out any aio kthread wakeup.
551 */ 549 */
552 if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) 550 if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
553 schedule_putreq++;
554 else
555 req->ki_filp = NULL;
556 if (req->ki_eventfd != NULL) {
557 if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
558 schedule_putreq++;
559 else
560 req->ki_eventfd = NULL;
561 }
562 if (unlikely(schedule_putreq)) {
563 get_ioctx(ctx); 551 get_ioctx(ctx);
564 spin_lock(&fput_lock); 552 spin_lock(&fput_lock);
565 list_add(&req->ki_list, &fput_head); 553 list_add(&req->ki_list, &fput_head);
566 spin_unlock(&fput_lock); 554 spin_unlock(&fput_lock);
567 queue_work(aio_wq, &fput_work); 555 queue_work(aio_wq, &fput_work);
568 } else 556 } else {
557 req->ki_filp = NULL;
569 really_put_req(ctx, req); 558 really_put_req(ctx, req);
559 }
570 return 1; 560 return 1;
571} 561}
572 562
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1622 * an eventfd() fd, and will be signaled for each completed 1612 * an eventfd() fd, and will be signaled for each completed
1623 * event using the eventfd_signal() function. 1613 * event using the eventfd_signal() function.
1624 */ 1614 */
1625 req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); 1615 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1626 if (IS_ERR(req->ki_eventfd)) { 1616 if (IS_ERR(req->ki_eventfd)) {
1627 ret = PTR_ERR(req->ki_eventfd); 1617 ret = PTR_ERR(req->ki_eventfd);
1628 req->ki_eventfd = NULL; 1618 req->ki_eventfd = NULL;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index f3da2eb51f5..00bf8fcb245 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -19,7 +19,6 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/compat.h> 20#include <linux/compat.h>
21#include <linux/syscalls.h> 21#include <linux/syscalls.h>
22#include <linux/smp_lock.h>
23#include <linux/magic.h> 22#include <linux/magic.h>
24#include <linux/dcache.h> 23#include <linux/dcache.h>
25#include <linux/uaccess.h> 24#include <linux/uaccess.h>
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 54bd07d44e6..1e41aadb106 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -8,7 +8,6 @@
8#include <linux/time.h> 8#include <linux/time.h>
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/smp_lock.h>
12#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include "bfs.h" 13#include "bfs.h"
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 6a021265f01..88b9a3ff44e 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -11,7 +11,6 @@
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/smp_lock.h>
15#include "bfs.h" 14#include "bfs.h"
16 15
17#undef DEBUG 16#undef DEBUG
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9fa212b014a..b7c1603cd4b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1522 info->thread = NULL; 1522 info->thread = NULL;
1523 1523
1524 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); 1524 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1525 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1526
1527 if (psinfo == NULL) 1525 if (psinfo == NULL)
1528 return 0; 1526 return 0;
1529 1527
1528 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1529
1530 /* 1530 /*
1531 * Figure out how many notes we're going to need for each thread. 1531 * Figure out how many notes we're going to need for each thread.
1532 */ 1532 */
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1929 elf = kmalloc(sizeof(*elf), GFP_KERNEL); 1929 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1930 if (!elf) 1930 if (!elf)
1931 goto out; 1931 goto out;
1932 1932 /*
1933 * The number of segs are recored into ELF header as 16bit value.
1934 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
1935 */
1933 segs = current->mm->map_count; 1936 segs = current->mm->map_count;
1934#ifdef ELF_CORE_EXTRA_PHDRS 1937#ifdef ELF_CORE_EXTRA_PHDRS
1935 segs += ELF_CORE_EXTRA_PHDRS; 1938 segs += ELF_CORE_EXTRA_PHDRS;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 697f6b5f131..e92f229e3c6 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
828 if (IS_ERR(bprm.file)) 828 if (IS_ERR(bprm.file))
829 return res; 829 return res;
830 830
831 bprm.cred = prepare_exec_creds();
832 res = -ENOMEM;
833 if (!bprm.cred)
834 goto out;
835
831 res = prepare_binprm(&bprm); 836 res = prepare_binprm(&bprm);
832 837
833 if (res <= (unsigned long)-4096) 838 if (res <= (unsigned long)-4096)
834 res = load_flat_file(&bprm, libs, id, NULL); 839 res = load_flat_file(&bprm, libs, id, NULL);
835 if (bprm.file) { 840
836 allow_write_access(bprm.file); 841 abort_creds(bprm.cred);
837 fput(bprm.file); 842
838 bprm.file = NULL; 843out:
839 } 844 allow_write_access(bprm.file);
845 fput(bprm.file);
846
840 return(res); 847 return(res);
841} 848}
842 849
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 31c46a241ba..49a34e7f730 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bio-integrity.c - bio data integrity extensions 2 * bio-integrity.c - bio data integrity extensions
3 * 3 *
4 * Copyright (C) 2007, 2008 Oracle Corporation 4 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com> 5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -25,63 +25,121 @@
25#include <linux/bio.h> 25#include <linux/bio.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27 27
28static struct kmem_cache *bio_integrity_slab __read_mostly; 28struct integrity_slab {
29static mempool_t *bio_integrity_pool; 29 struct kmem_cache *slab;
30static struct bio_set *integrity_bio_set; 30 unsigned short nr_vecs;
31 char name[8];
32};
33
34#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
35struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
36 IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
37};
38#undef IS
39
31static struct workqueue_struct *kintegrityd_wq; 40static struct workqueue_struct *kintegrityd_wq;
32 41
42static inline unsigned int vecs_to_idx(unsigned int nr)
43{
44 switch (nr) {
45 case 1:
46 return 0;
47 case 2 ... 4:
48 return 1;
49 case 5 ... 16:
50 return 2;
51 case 17 ... 64:
52 return 3;
53 case 65 ... 128:
54 return 4;
55 case 129 ... BIO_MAX_PAGES:
56 return 5;
57 default:
58 BUG();
59 }
60}
61
62static inline int use_bip_pool(unsigned int idx)
63{
64 if (idx == BIOVEC_NR_POOLS)
65 return 1;
66
67 return 0;
68}
69
33/** 70/**
34 * bio_integrity_alloc - Allocate integrity payload and attach it to bio 71 * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
35 * @bio: bio to attach integrity metadata to 72 * @bio: bio to attach integrity metadata to
36 * @gfp_mask: Memory allocation mask 73 * @gfp_mask: Memory allocation mask
37 * @nr_vecs: Number of integrity metadata scatter-gather elements 74 * @nr_vecs: Number of integrity metadata scatter-gather elements
75 * @bs: bio_set to allocate from
38 * 76 *
39 * Description: This function prepares a bio for attaching integrity 77 * Description: This function prepares a bio for attaching integrity
40 * metadata. nr_vecs specifies the maximum number of pages containing 78 * metadata. nr_vecs specifies the maximum number of pages containing
41 * integrity metadata that can be attached. 79 * integrity metadata that can be attached.
42 */ 80 */
43struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, 81struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
44 gfp_t gfp_mask, 82 gfp_t gfp_mask,
45 unsigned int nr_vecs) 83 unsigned int nr_vecs,
84 struct bio_set *bs)
46{ 85{
47 struct bio_integrity_payload *bip; 86 struct bio_integrity_payload *bip;
48 struct bio_vec *iv; 87 unsigned int idx = vecs_to_idx(nr_vecs);
49 unsigned long idx;
50 88
51 BUG_ON(bio == NULL); 89 BUG_ON(bio == NULL);
90 bip = NULL;
52 91
53 bip = mempool_alloc(bio_integrity_pool, gfp_mask); 92 /* Lower order allocations come straight from slab */
54 if (unlikely(bip == NULL)) { 93 if (!use_bip_pool(idx))
55 printk(KERN_ERR "%s: could not alloc bip\n", __func__); 94 bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
56 return NULL;
57 }
58 95
59 memset(bip, 0, sizeof(*bip)); 96 /* Use mempool if lower order alloc failed or max vecs were requested */
97 if (bip == NULL) {
98 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
60 99
61 iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set); 100 if (unlikely(bip == NULL)) {
62 if (unlikely(iv == NULL)) { 101 printk(KERN_ERR "%s: could not alloc bip\n", __func__);
63 printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__); 102 return NULL;
64 mempool_free(bip, bio_integrity_pool); 103 }
65 return NULL;
66 } 104 }
67 105
68 bip->bip_pool = idx; 106 memset(bip, 0, sizeof(*bip));
69 bip->bip_vec = iv; 107
108 bip->bip_slab = idx;
70 bip->bip_bio = bio; 109 bip->bip_bio = bio;
71 bio->bi_integrity = bip; 110 bio->bi_integrity = bip;
72 111
73 return bip; 112 return bip;
74} 113}
114EXPORT_SYMBOL(bio_integrity_alloc_bioset);
115
116/**
117 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
118 * @bio: bio to attach integrity metadata to
119 * @gfp_mask: Memory allocation mask
120 * @nr_vecs: Number of integrity metadata scatter-gather elements
121 *
122 * Description: This function prepares a bio for attaching integrity
123 * metadata. nr_vecs specifies the maximum number of pages containing
124 * integrity metadata that can be attached.
125 */
126struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
127 gfp_t gfp_mask,
128 unsigned int nr_vecs)
129{
130 return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
131}
75EXPORT_SYMBOL(bio_integrity_alloc); 132EXPORT_SYMBOL(bio_integrity_alloc);
76 133
77/** 134/**
78 * bio_integrity_free - Free bio integrity payload 135 * bio_integrity_free - Free bio integrity payload
79 * @bio: bio containing bip to be freed 136 * @bio: bio containing bip to be freed
137 * @bs: bio_set this bio was allocated from
80 * 138 *
81 * Description: Used to free the integrity portion of a bio. Usually 139 * Description: Used to free the integrity portion of a bio. Usually
82 * called from bio_free(). 140 * called from bio_free().
83 */ 141 */
84void bio_integrity_free(struct bio *bio) 142void bio_integrity_free(struct bio *bio, struct bio_set *bs)
85{ 143{
86 struct bio_integrity_payload *bip = bio->bi_integrity; 144 struct bio_integrity_payload *bip = bio->bi_integrity;
87 145
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio)
92 && bip->bip_buf != NULL) 150 && bip->bip_buf != NULL)
93 kfree(bip->bip_buf); 151 kfree(bip->bip_buf);
94 152
95 bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool); 153 if (use_bip_pool(bip->bip_slab))
96 mempool_free(bip, bio_integrity_pool); 154 mempool_free(bip, bs->bio_integrity_pool);
155 else
156 kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
97 157
98 bio->bi_integrity = NULL; 158 bio->bi_integrity = NULL;
99} 159}
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
114 struct bio_integrity_payload *bip = bio->bi_integrity; 174 struct bio_integrity_payload *bip = bio->bi_integrity;
115 struct bio_vec *iv; 175 struct bio_vec *iv;
116 176
117 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) { 177 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
118 printk(KERN_ERR "%s: bip_vec full\n", __func__); 178 printk(KERN_ERR "%s: bip_vec full\n", __func__);
119 return 0; 179 return 0;
120 } 180 }
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
647 bp->iv1 = bip->bip_vec[0]; 707 bp->iv1 = bip->bip_vec[0];
648 bp->iv2 = bip->bip_vec[0]; 708 bp->iv2 = bip->bip_vec[0];
649 709
650 bp->bip1.bip_vec = &bp->iv1; 710 bp->bip1.bip_vec[0] = bp->iv1;
651 bp->bip2.bip_vec = &bp->iv2; 711 bp->bip2.bip_vec[0] = bp->iv2;
652 712
653 bp->iv1.bv_len = sectors * bi->tuple_size; 713 bp->iv1.bv_len = sectors * bi->tuple_size;
654 bp->iv2.bv_offset += sectors * bi->tuple_size; 714 bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split);
667 * @bio: New bio 727 * @bio: New bio
668 * @bio_src: Original bio 728 * @bio_src: Original bio
669 * @gfp_mask: Memory allocation mask 729 * @gfp_mask: Memory allocation mask
730 * @bs: bio_set to allocate bip from
670 * 731 *
671 * Description: Called to allocate a bip when cloning a bio 732 * Description: Called to allocate a bip when cloning a bio
672 */ 733 */
673int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) 734int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
735 gfp_t gfp_mask, struct bio_set *bs)
674{ 736{
675 struct bio_integrity_payload *bip_src = bio_src->bi_integrity; 737 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
676 struct bio_integrity_payload *bip; 738 struct bio_integrity_payload *bip;
677 739
678 BUG_ON(bip_src == NULL); 740 BUG_ON(bip_src == NULL);
679 741
680 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); 742 bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
681 743
682 if (bip == NULL) 744 if (bip == NULL)
683 return -EIO; 745 return -EIO;
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
693} 755}
694EXPORT_SYMBOL(bio_integrity_clone); 756EXPORT_SYMBOL(bio_integrity_clone);
695 757
696static int __init bio_integrity_init(void) 758int bioset_integrity_create(struct bio_set *bs, int pool_size)
697{ 759{
698 kintegrityd_wq = create_workqueue("kintegrityd"); 760 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
761
762 bs->bio_integrity_pool =
763 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
699 764
765 if (!bs->bio_integrity_pool)
766 return -1;
767
768 return 0;
769}
770EXPORT_SYMBOL(bioset_integrity_create);
771
772void bioset_integrity_free(struct bio_set *bs)
773{
774 if (bs->bio_integrity_pool)
775 mempool_destroy(bs->bio_integrity_pool);
776}
777EXPORT_SYMBOL(bioset_integrity_free);
778
779void __init bio_integrity_init(void)
780{
781 unsigned int i;
782
783 kintegrityd_wq = create_workqueue("kintegrityd");
700 if (!kintegrityd_wq) 784 if (!kintegrityd_wq)
701 panic("Failed to create kintegrityd\n"); 785 panic("Failed to create kintegrityd\n");
702 786
703 bio_integrity_slab = KMEM_CACHE(bio_integrity_payload, 787 for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
704 SLAB_HWCACHE_ALIGN|SLAB_PANIC); 788 unsigned int size;
705 789
706 bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE, 790 size = sizeof(struct bio_integrity_payload)
707 bio_integrity_slab); 791 + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
708 if (!bio_integrity_pool)
709 panic("bio_integrity: can't allocate bip pool\n");
710 792
711 integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0); 793 bip_slab[i].slab =
712 if (!integrity_bio_set) 794 kmem_cache_create(bip_slab[i].name, size, 0,
713 panic("bio_integrity: can't allocate bio_set\n"); 795 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
714 796 }
715 return 0;
716} 797}
717subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index 24c91404353..76738005c8e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); 238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
239 239
240 if (bio_integrity(bio)) 240 if (bio_integrity(bio))
241 bio_integrity_free(bio); 241 bio_integrity_free(bio, bs);
242 242
243 /* 243 /*
244 * If we have front padding, adjust the bio pointer before freeing 244 * If we have front padding, adjust the bio pointer before freeing
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
341static void bio_kmalloc_destructor(struct bio *bio) 341static void bio_kmalloc_destructor(struct bio *bio)
342{ 342{
343 if (bio_integrity(bio)) 343 if (bio_integrity(bio))
344 bio_integrity_free(bio); 344 bio_integrity_free(bio, fs_bio_set);
345 kfree(bio); 345 kfree(bio);
346} 346}
347 347
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
472 if (bio_integrity(bio)) { 472 if (bio_integrity(bio)) {
473 int ret; 473 int ret;
474 474
475 ret = bio_integrity_clone(b, bio, gfp_mask); 475 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
476 476
477 if (ret < 0) { 477 if (ret < 0) {
478 bio_put(b); 478 bio_put(b);
@@ -705,14 +705,13 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
705} 705}
706 706
707static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 707static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
708 struct sg_iovec *iov, int iov_count, int uncopy, 708 struct sg_iovec *iov, int iov_count,
709 int do_free_page) 709 int to_user, int from_user, int do_free_page)
710{ 710{
711 int ret = 0, i; 711 int ret = 0, i;
712 struct bio_vec *bvec; 712 struct bio_vec *bvec;
713 int iov_idx = 0; 713 int iov_idx = 0;
714 unsigned int iov_off = 0; 714 unsigned int iov_off = 0;
715 int read = bio_data_dir(bio) == READ;
716 715
717 __bio_for_each_segment(bvec, bio, i, 0) { 716 __bio_for_each_segment(bvec, bio, i, 0) {
718 char *bv_addr = page_address(bvec->bv_page); 717 char *bv_addr = page_address(bvec->bv_page);
@@ -727,13 +726,14 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
727 iov_addr = iov[iov_idx].iov_base + iov_off; 726 iov_addr = iov[iov_idx].iov_base + iov_off;
728 727
729 if (!ret) { 728 if (!ret) {
730 if (!read && !uncopy) 729 if (to_user)
731 ret = copy_from_user(bv_addr, iov_addr,
732 bytes);
733 if (read && uncopy)
734 ret = copy_to_user(iov_addr, bv_addr, 730 ret = copy_to_user(iov_addr, bv_addr,
735 bytes); 731 bytes);
736 732
733 if (from_user)
734 ret = copy_from_user(bv_addr, iov_addr,
735 bytes);
736
737 if (ret) 737 if (ret)
738 ret = -EFAULT; 738 ret = -EFAULT;
739 } 739 }
@@ -770,7 +770,8 @@ int bio_uncopy_user(struct bio *bio)
770 770
771 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 771 if (!bio_flagged(bio, BIO_NULL_MAPPED))
772 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 772 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
773 bmd->nr_sgvecs, 1, bmd->is_our_pages); 773 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
774 0, bmd->is_our_pages);
774 bio_free_map_data(bmd); 775 bio_free_map_data(bmd);
775 bio_put(bio); 776 bio_put(bio);
776 return ret; 777 return ret;
@@ -875,8 +876,9 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
875 /* 876 /*
876 * success 877 * success
877 */ 878 */
878 if (!write_to_vm && (!map_data || !map_data->null_mapped)) { 879 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
879 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0); 880 (map_data && map_data->from_user)) {
881 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
880 if (ret) 882 if (ret)
881 goto cleanup; 883 goto cleanup;
882 } 884 }
@@ -1539,6 +1541,7 @@ void bioset_free(struct bio_set *bs)
1539 if (bs->bio_pool) 1541 if (bs->bio_pool)
1540 mempool_destroy(bs->bio_pool); 1542 mempool_destroy(bs->bio_pool);
1541 1543
1544 bioset_integrity_free(bs);
1542 biovec_free_pools(bs); 1545 biovec_free_pools(bs);
1543 bio_put_slab(bs); 1546 bio_put_slab(bs);
1544 1547
@@ -1579,6 +1582,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1579 if (!bs->bio_pool) 1582 if (!bs->bio_pool)
1580 goto bad; 1583 goto bad;
1581 1584
1585 if (bioset_integrity_create(bs, pool_size))
1586 goto bad;
1587
1582 if (!biovec_create_pools(bs, pool_size)) 1588 if (!biovec_create_pools(bs, pool_size))
1583 return bs; 1589 return bs;
1584 1590
@@ -1616,6 +1622,7 @@ static int __init init_bio(void)
1616 if (!bio_slabs) 1622 if (!bio_slabs)
1617 panic("bio: can't allocate bios\n"); 1623 panic("bio: can't allocate bios\n");
1618 1624
1625 bio_integrity_init();
1619 biovec_init_slabs(); 1626 biovec_init_slabs();
1620 1627
1621 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 1628 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3a6d4fb2a32..94dfda24c06 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -564,6 +564,16 @@ struct block_device *bdget(dev_t dev)
564 564
565EXPORT_SYMBOL(bdget); 565EXPORT_SYMBOL(bdget);
566 566
567/**
568 * bdgrab -- Grab a reference to an already referenced block device
569 * @bdev: Block device to grab a reference to.
570 */
571struct block_device *bdgrab(struct block_device *bdev)
572{
573 atomic_inc(&bdev->bd_inode->i_count);
574 return bdev;
575}
576
567long nr_blockdev_pages(void) 577long nr_blockdev_pages(void)
568{ 578{
569 struct block_device *bdev; 579 struct block_device *bdev;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7f88628a1a7..019e8af449a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
299 "btrfs-%s-%d", workers->name, 299 "btrfs-%s-%d", workers->name,
300 workers->num_workers + i); 300 workers->num_workers + i);
301 if (IS_ERR(worker->task)) { 301 if (IS_ERR(worker->task)) {
302 kfree(worker);
303 ret = PTR_ERR(worker->task); 302 ret = PTR_ERR(worker->task);
303 kfree(worker);
304 goto fail; 304 goto fail;
305 } 305 }
306 306
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
424 * list 424 * list
425 */ 425 */
426 if (worker->idle) { 426 if (worker->idle) {
427 spin_lock_irqsave(&worker->workers->lock, flags); 427 spin_lock(&worker->workers->lock);
428 worker->idle = 0; 428 worker->idle = 0;
429 list_move_tail(&worker->worker_list, 429 list_move_tail(&worker->worker_list,
430 &worker->workers->worker_list); 430 &worker->workers->worker_list);
431 spin_unlock_irqrestore(&worker->workers->lock, flags); 431 spin_unlock(&worker->workers->lock);
432 } 432 }
433 if (!worker->working) { 433 if (!worker->working) {
434 wake = 1; 434 wake = 1;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de1e2fd3208..9d8ba4d54a3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -26,7 +26,6 @@
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/smp_lock.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/mpage.h> 30#include <linux/mpage.h>
32#include <linux/swap.h> 31#include <linux/swap.h>
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 60a45f3a4e9..3fdcc0512d3 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
557 557
558 btrfs_disk_key_to_cpu(&k1, disk); 558 btrfs_disk_key_to_cpu(&k1, disk);
559 559
560 if (k1.objectid > k2->objectid) 560 return btrfs_comp_cpu_keys(&k1, k2);
561 return 1;
562 if (k1.objectid < k2->objectid)
563 return -1;
564 if (k1.type > k2->type)
565 return 1;
566 if (k1.type < k2->type)
567 return -1;
568 if (k1.offset > k2->offset)
569 return 1;
570 if (k1.offset < k2->offset)
571 return -1;
572 return 0;
573} 561}
574 562
575/* 563/*
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1052 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1040 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1053 return 0; 1041 return 0;
1054 1042
1055 if (btrfs_header_nritems(mid) > 2)
1056 return 0;
1057
1058 if (btrfs_header_nritems(mid) < 2) 1043 if (btrfs_header_nritems(mid) < 2)
1059 err_on_enospc = 1; 1044 err_on_enospc = 1;
1060 1045
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1701 struct extent_buffer *b; 1686 struct extent_buffer *b;
1702 int slot; 1687 int slot;
1703 int ret; 1688 int ret;
1689 int err;
1704 int level; 1690 int level;
1705 int lowest_unlock = 1; 1691 int lowest_unlock = 1;
1706 u8 lowest_level = 0; 1692 u8 lowest_level = 0;
@@ -1737,8 +1723,6 @@ again:
1737 p->locks[level] = 1; 1723 p->locks[level] = 1;
1738 1724
1739 if (cow) { 1725 if (cow) {
1740 int wret;
1741
1742 /* 1726 /*
1743 * if we don't really need to cow this block 1727 * if we don't really need to cow this block
1744 * then we don't want to set the path blocking, 1728 * then we don't want to set the path blocking,
@@ -1749,12 +1733,12 @@ again:
1749 1733
1750 btrfs_set_path_blocking(p); 1734 btrfs_set_path_blocking(p);
1751 1735
1752 wret = btrfs_cow_block(trans, root, b, 1736 err = btrfs_cow_block(trans, root, b,
1753 p->nodes[level + 1], 1737 p->nodes[level + 1],
1754 p->slots[level + 1], &b); 1738 p->slots[level + 1], &b);
1755 if (wret) { 1739 if (err) {
1756 free_extent_buffer(b); 1740 free_extent_buffer(b);
1757 ret = wret; 1741 ret = err;
1758 goto done; 1742 goto done;
1759 } 1743 }
1760 } 1744 }
@@ -1793,41 +1777,45 @@ cow_done:
1793 ret = bin_search(b, key, level, &slot); 1777 ret = bin_search(b, key, level, &slot);
1794 1778
1795 if (level != 0) { 1779 if (level != 0) {
1796 if (ret && slot > 0) 1780 int dec = 0;
1781 if (ret && slot > 0) {
1782 dec = 1;
1797 slot -= 1; 1783 slot -= 1;
1784 }
1798 p->slots[level] = slot; 1785 p->slots[level] = slot;
1799 ret = setup_nodes_for_search(trans, root, p, b, level, 1786 err = setup_nodes_for_search(trans, root, p, b, level,
1800 ins_len); 1787 ins_len);
1801 if (ret == -EAGAIN) 1788 if (err == -EAGAIN)
1802 goto again; 1789 goto again;
1803 else if (ret) 1790 if (err) {
1791 ret = err;
1804 goto done; 1792 goto done;
1793 }
1805 b = p->nodes[level]; 1794 b = p->nodes[level];
1806 slot = p->slots[level]; 1795 slot = p->slots[level];
1807 1796
1808 unlock_up(p, level, lowest_unlock); 1797 unlock_up(p, level, lowest_unlock);
1809 1798
1810 /* this is only true while dropping a snapshot */
1811 if (level == lowest_level) { 1799 if (level == lowest_level) {
1812 ret = 0; 1800 if (dec)
1801 p->slots[level]++;
1813 goto done; 1802 goto done;
1814 } 1803 }
1815 1804
1816 ret = read_block_for_search(trans, root, p, 1805 err = read_block_for_search(trans, root, p,
1817 &b, level, slot, key); 1806 &b, level, slot, key);
1818 if (ret == -EAGAIN) 1807 if (err == -EAGAIN)
1819 goto again; 1808 goto again;
1820 1809 if (err) {
1821 if (ret == -EIO) 1810 ret = err;
1822 goto done; 1811 goto done;
1812 }
1823 1813
1824 if (!p->skip_locking) { 1814 if (!p->skip_locking) {
1825 int lret;
1826
1827 btrfs_clear_path_blocking(p, NULL); 1815 btrfs_clear_path_blocking(p, NULL);
1828 lret = btrfs_try_spin_lock(b); 1816 err = btrfs_try_spin_lock(b);
1829 1817
1830 if (!lret) { 1818 if (!err) {
1831 btrfs_set_path_blocking(p); 1819 btrfs_set_path_blocking(p);
1832 btrfs_tree_lock(b); 1820 btrfs_tree_lock(b);
1833 btrfs_clear_path_blocking(p, b); 1821 btrfs_clear_path_blocking(p, b);
@@ -1837,16 +1825,14 @@ cow_done:
1837 p->slots[level] = slot; 1825 p->slots[level] = slot;
1838 if (ins_len > 0 && 1826 if (ins_len > 0 &&
1839 btrfs_leaf_free_space(root, b) < ins_len) { 1827 btrfs_leaf_free_space(root, b) < ins_len) {
1840 int sret;
1841
1842 btrfs_set_path_blocking(p); 1828 btrfs_set_path_blocking(p);
1843 sret = split_leaf(trans, root, key, 1829 err = split_leaf(trans, root, key,
1844 p, ins_len, ret == 0); 1830 p, ins_len, ret == 0);
1845 btrfs_clear_path_blocking(p, NULL); 1831 btrfs_clear_path_blocking(p, NULL);
1846 1832
1847 BUG_ON(sret > 0); 1833 BUG_ON(err > 0);
1848 if (sret) { 1834 if (err) {
1849 ret = sret; 1835 ret = err;
1850 goto done; 1836 goto done;
1851 } 1837 }
1852 } 1838 }
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3807 } 3793 }
3808 3794
3809 /* delete the leaf if it is mostly empty */ 3795 /* delete the leaf if it is mostly empty */
3810 if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { 3796 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3811 /* push_leaf_left fixes the path. 3797 /* push_leaf_left fixes the path.
3812 * make sure the path still points to our leaf 3798 * make sure the path still points to our leaf
3813 * for possible call to del_ptr below 3799 * for possible call to del_ptr below
@@ -4042,10 +4028,9 @@ out:
4042 * calling this function. 4028 * calling this function.
4043 */ 4029 */
4044int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4030int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4045 struct btrfs_key *key, int lowest_level, 4031 struct btrfs_key *key, int level,
4046 int cache_only, u64 min_trans) 4032 int cache_only, u64 min_trans)
4047{ 4033{
4048 int level = lowest_level;
4049 int slot; 4034 int slot;
4050 struct extent_buffer *c; 4035 struct extent_buffer *c;
4051 4036
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4058 c = path->nodes[level]; 4043 c = path->nodes[level];
4059next: 4044next:
4060 if (slot >= btrfs_header_nritems(c)) { 4045 if (slot >= btrfs_header_nritems(c)) {
4061 level++; 4046 int ret;
4062 if (level == BTRFS_MAX_LEVEL) 4047 int orig_lowest;
4048 struct btrfs_key cur_key;
4049 if (level + 1 >= BTRFS_MAX_LEVEL ||
4050 !path->nodes[level + 1])
4063 return 1; 4051 return 1;
4064 continue; 4052
4053 if (path->locks[level + 1]) {
4054 level++;
4055 continue;
4056 }
4057
4058 slot = btrfs_header_nritems(c) - 1;
4059 if (level == 0)
4060 btrfs_item_key_to_cpu(c, &cur_key, slot);
4061 else
4062 btrfs_node_key_to_cpu(c, &cur_key, slot);
4063
4064 orig_lowest = path->lowest_level;
4065 btrfs_release_path(root, path);
4066 path->lowest_level = level;
4067 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4068 0, 0);
4069 path->lowest_level = orig_lowest;
4070 if (ret < 0)
4071 return ret;
4072
4073 c = path->nodes[level];
4074 slot = path->slots[level];
4075 if (ret == 0)
4076 slot++;
4077 goto next;
4065 } 4078 }
4079
4066 if (level == 0) 4080 if (level == 0)
4067 btrfs_item_key_to_cpu(c, key, slot); 4081 btrfs_item_key_to_cpu(c, key, slot);
4068 else { 4082 else {
@@ -4146,7 +4160,8 @@ again:
4146 * advance the path if there are now more items available. 4160 * advance the path if there are now more items available.
4147 */ 4161 */
4148 if (nritems > 0 && path->slots[0] < nritems - 1) { 4162 if (nritems > 0 && path->slots[0] < nritems - 1) {
4149 path->slots[0]++; 4163 if (ret == 0)
4164 path->slots[0]++;
4150 ret = 0; 4165 ret = 0;
4151 goto done; 4166 goto done;
4152 } 4167 }
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root,
4278 path->slots[0]--; 4293 path->slots[0]--;
4279 4294
4280 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4295 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4281 if (found_key.type == type)
4282 return 0;
4283 if (found_key.objectid < min_objectid) 4296 if (found_key.objectid < min_objectid)
4284 break; 4297 break;
4298 if (found_key.type == type)
4299 return 0;
4285 if (found_key.objectid == min_objectid && 4300 if (found_key.objectid == min_objectid &&
4286 found_key.type < type) 4301 found_key.type < type)
4287 break; 4302 break;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2779c2f5360..837435ce84c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref {
481 481
482struct btrfs_extent_inline_ref { 482struct btrfs_extent_inline_ref {
483 u8 type; 483 u8 type;
484 u64 offset; 484 __le64 offset;
485} __attribute__ ((__packed__)); 485} __attribute__ ((__packed__));
486 486
487/* old style backrefs item */ 487/* old style backrefs item */
@@ -689,6 +689,7 @@ struct btrfs_space_info {
689 struct list_head block_groups; 689 struct list_head block_groups;
690 spinlock_t lock; 690 spinlock_t lock;
691 struct rw_semaphore groups_sem; 691 struct rw_semaphore groups_sem;
692 atomic_t caching_threads;
692}; 693};
693 694
694/* 695/*
@@ -707,6 +708,9 @@ struct btrfs_free_cluster {
707 /* first extent starting offset */ 708 /* first extent starting offset */
708 u64 window_start; 709 u64 window_start;
709 710
711 /* if this cluster simply points at a bitmap in the block group */
712 bool points_to_bitmap;
713
710 struct btrfs_block_group_cache *block_group; 714 struct btrfs_block_group_cache *block_group;
711 /* 715 /*
712 * when a cluster is allocated from a block group, we put the 716 * when a cluster is allocated from a block group, we put the
@@ -716,24 +720,37 @@ struct btrfs_free_cluster {
716 struct list_head block_group_list; 720 struct list_head block_group_list;
717}; 721};
718 722
723enum btrfs_caching_type {
724 BTRFS_CACHE_NO = 0,
725 BTRFS_CACHE_STARTED = 1,
726 BTRFS_CACHE_FINISHED = 2,
727};
728
719struct btrfs_block_group_cache { 729struct btrfs_block_group_cache {
720 struct btrfs_key key; 730 struct btrfs_key key;
721 struct btrfs_block_group_item item; 731 struct btrfs_block_group_item item;
732 struct btrfs_fs_info *fs_info;
722 spinlock_t lock; 733 spinlock_t lock;
723 struct mutex cache_mutex;
724 u64 pinned; 734 u64 pinned;
725 u64 reserved; 735 u64 reserved;
726 u64 flags; 736 u64 flags;
727 int cached; 737 u64 sectorsize;
738 int extents_thresh;
739 int free_extents;
740 int total_bitmaps;
728 int ro; 741 int ro;
729 int dirty; 742 int dirty;
730 743
744 /* cache tracking stuff */
745 wait_queue_head_t caching_q;
746 int cached;
747
731 struct btrfs_space_info *space_info; 748 struct btrfs_space_info *space_info;
732 749
733 /* free space cache stuff */ 750 /* free space cache stuff */
734 spinlock_t tree_lock; 751 spinlock_t tree_lock;
735 struct rb_root free_space_bytes;
736 struct rb_root free_space_offset; 752 struct rb_root free_space_offset;
753 u64 free_space;
737 754
738 /* block group cache stuff */ 755 /* block group cache stuff */
739 struct rb_node cache_node; 756 struct rb_node cache_node;
@@ -808,6 +825,7 @@ struct btrfs_fs_info {
808 struct mutex drop_mutex; 825 struct mutex drop_mutex;
809 struct mutex volume_mutex; 826 struct mutex volume_mutex;
810 struct mutex tree_reloc_mutex; 827 struct mutex tree_reloc_mutex;
828 struct rw_semaphore extent_commit_sem;
811 829
812 /* 830 /*
813 * this protects the ordered operations list only while we are 831 * this protects the ordered operations list only while we are
@@ -1988,6 +2006,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
1988 u64 bytes); 2006 u64 bytes);
1989void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, 2007void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
1990 u64 bytes); 2008 u64 bytes);
2009void btrfs_free_pinned_extents(struct btrfs_fs_info *info);
1991/* ctree.c */ 2010/* ctree.c */
1992int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2011int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1993 int level, int *slot); 2012 int level, int *slot);
@@ -2074,8 +2093,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
2074int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2093int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
2075int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2094int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2076int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2095int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2077int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root 2096int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref);
2078 *root);
2079int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2097int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2080 struct btrfs_root *root, 2098 struct btrfs_root *root,
2081 struct extent_buffer *node, 2099 struct extent_buffer *node,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d28d29c95f7..e83be2e4602 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1639,6 +1639,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1639 mutex_init(&fs_info->cleaner_mutex); 1639 mutex_init(&fs_info->cleaner_mutex);
1640 mutex_init(&fs_info->volume_mutex); 1640 mutex_init(&fs_info->volume_mutex);
1641 mutex_init(&fs_info->tree_reloc_mutex); 1641 mutex_init(&fs_info->tree_reloc_mutex);
1642 init_rwsem(&fs_info->extent_commit_sem);
1642 1643
1643 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 1644 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1644 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 1645 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1799 btrfs_super_chunk_root(disk_super), 1800 btrfs_super_chunk_root(disk_super),
1800 blocksize, generation); 1801 blocksize, generation);
1801 BUG_ON(!chunk_root->node); 1802 BUG_ON(!chunk_root->node);
1803 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1804 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1805 sb->s_id);
1806 goto fail_chunk_root;
1807 }
1802 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 1808 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1803 chunk_root->commit_root = btrfs_root_node(chunk_root); 1809 chunk_root->commit_root = btrfs_root_node(chunk_root);
1804 1810
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1826 blocksize, generation); 1832 blocksize, generation);
1827 if (!tree_root->node) 1833 if (!tree_root->node)
1828 goto fail_chunk_root; 1834 goto fail_chunk_root;
1835 if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1836 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1837 sb->s_id);
1838 goto fail_tree_root;
1839 }
1829 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 1840 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1830 tree_root->commit_root = btrfs_root_node(tree_root); 1841 tree_root->commit_root = btrfs_root_node(tree_root);
1831 1842
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root)
2322 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2333 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2323 } 2334 }
2324 2335
2336 fs_info->closing = 2;
2337 smp_mb();
2338
2325 if (fs_info->delalloc_bytes) { 2339 if (fs_info->delalloc_bytes) {
2326 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", 2340 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2327 (unsigned long long)fs_info->delalloc_bytes); 2341 (unsigned long long)fs_info->delalloc_bytes);
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root)
2343 free_extent_buffer(root->fs_info->csum_root->commit_root); 2357 free_extent_buffer(root->fs_info->csum_root->commit_root);
2344 2358
2345 btrfs_free_block_groups(root->fs_info); 2359 btrfs_free_block_groups(root->fs_info);
2360 btrfs_free_pinned_extents(root->fs_info);
2346 2361
2347 del_fs_roots(fs_info); 2362 del_fs_roots(fs_info);
2348 2363
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index edc7d208c5c..72a2b9c28e9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -21,6 +21,7 @@
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/sort.h> 22#include <linux/sort.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/kthread.h>
24#include "compat.h" 25#include "compat.h"
25#include "hash.h" 26#include "hash.h"
26#include "ctree.h" 27#include "ctree.h"
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes, 62 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force); 63 u64 flags, int force);
63 64
65static noinline int
66block_group_cache_done(struct btrfs_block_group_cache *cache)
67{
68 smp_mb();
69 return cache->cached == BTRFS_CACHE_FINISHED;
70}
71
64static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 72static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
65{ 73{
66 return (cache->flags & bits) == bits; 74 return (cache->flags & bits) == bits;
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
146} 154}
147 155
148/* 156/*
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
161 */
162void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163{
164 u64 start, end, last = 0;
165 int ret;
166
167 while (1) {
168 ret = find_first_extent_bit(&info->pinned_extents, last,
169 &start, &end,
170 EXTENT_LOCKED|EXTENT_DIRTY);
171 if (ret)
172 break;
173
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176 last = end+1;
177 }
178}
179
180static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
182{
183 struct btrfs_fs_info *fs_info = root->fs_info;
184 u64 bytenr;
185 u64 *logical;
186 int stripe_len;
187 int i, nr, ret;
188
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
194 BUG_ON(ret);
195 while (nr--) {
196 try_lock_extent(&fs_info->pinned_extents,
197 logical[nr],
198 logical[nr] + stripe_len - 1, GFP_NOFS);
199 }
200 kfree(logical);
201 }
202
203 return 0;
204}
205
206/*
149 * this is only called by cache_block_group, since we could have freed extents 207 * this is only called by cache_block_group, since we could have freed extents
150 * we need to check the pinned_extents for any extents that can't be used yet 208 * we need to check the pinned_extents for any extents that can't be used yet
151 * since their free space will be released as soon as the transaction commits. 209 * since their free space will be released as soon as the transaction commits.
152 */ 210 */
153static int add_new_free_space(struct btrfs_block_group_cache *block_group, 211static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
154 struct btrfs_fs_info *info, u64 start, u64 end) 212 struct btrfs_fs_info *info, u64 start, u64 end)
155{ 213{
156 u64 extent_start, extent_end, size; 214 u64 extent_start, extent_end, size, total_added = 0;
157 int ret; 215 int ret;
158 216
159 while (start < end) { 217 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start, 218 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end, 219 &extent_start, &extent_end,
162 EXTENT_DIRTY); 220 EXTENT_DIRTY|EXTENT_LOCKED);
163 if (ret) 221 if (ret)
164 break; 222 break;
165 223
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
167 start = extent_end + 1; 225 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) { 226 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start; 227 size = extent_start - start;
228 total_added += size;
170 ret = btrfs_add_free_space(block_group, start, 229 ret = btrfs_add_free_space(block_group, start,
171 size); 230 size);
172 BUG_ON(ret); 231 BUG_ON(ret);
@@ -178,84 +237,93 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
178 237
179 if (start < end) { 238 if (start < end) {
180 size = end - start; 239 size = end - start;
240 total_added += size;
181 ret = btrfs_add_free_space(block_group, start, size); 241 ret = btrfs_add_free_space(block_group, start, size);
182 BUG_ON(ret); 242 BUG_ON(ret);
183 } 243 }
184 244
185 return 0; 245 return total_added;
186} 246}
187 247
188static int remove_sb_from_cache(struct btrfs_root *root, 248static int caching_kthread(void *data)
189 struct btrfs_block_group_cache *cache)
190{
191 u64 bytenr;
192 u64 *logical;
193 int stripe_len;
194 int i, nr, ret;
195
196 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
197 bytenr = btrfs_sb_offset(i);
198 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
199 cache->key.objectid, bytenr, 0,
200 &logical, &nr, &stripe_len);
201 BUG_ON(ret);
202 while (nr--) {
203 btrfs_remove_free_space(cache, logical[nr],
204 stripe_len);
205 }
206 kfree(logical);
207 }
208 return 0;
209}
210
211static int cache_block_group(struct btrfs_root *root,
212 struct btrfs_block_group_cache *block_group)
213{ 249{
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
252 u64 last = 0;
214 struct btrfs_path *path; 253 struct btrfs_path *path;
215 int ret = 0; 254 int ret = 0;
216 struct btrfs_key key; 255 struct btrfs_key key;
217 struct extent_buffer *leaf; 256 struct extent_buffer *leaf;
218 int slot; 257 int slot;
219 u64 last; 258 u64 total_found = 0;
220 259
221 if (!block_group) 260 BUG_ON(!fs_info);
222 return 0;
223
224 root = root->fs_info->extent_root;
225
226 if (block_group->cached)
227 return 0;
228 261
229 path = btrfs_alloc_path(); 262 path = btrfs_alloc_path();
230 if (!path) 263 if (!path)
231 return -ENOMEM; 264 return -ENOMEM;
232 265
233 path->reada = 2; 266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
234 /* 268 /*
235 * we get into deadlocks with paths held by callers of this function. 269 * We don't want to deadlock with somebody trying to allocate a new
236 * since the alloc_mutex is protecting things right now, just 270 * extent for the extent root while also trying to search the extent
237 * skip the locking here 271 * root to add free space. So we skip locking and search the commit
272 * root, since its read-only
238 */ 273 */
239 path->skip_locking = 1; 274 path->skip_locking = 1;
240 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 275 path->search_commit_root = 1;
276 path->reada = 2;
277
241 key.objectid = last; 278 key.objectid = last;
242 key.offset = 0; 279 key.offset = 0;
243 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 280 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
244 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 281again:
282 /* need to make sure the commit_root doesn't disappear */
283 down_read(&fs_info->extent_commit_sem);
284
285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
245 if (ret < 0) 286 if (ret < 0)
246 goto err; 287 goto err;
247 288
248 while (1) { 289 while (1) {
290 smp_mb();
291 if (block_group->fs_info->closing > 1) {
292 last = (u64)-1;
293 break;
294 }
295
249 leaf = path->nodes[0]; 296 leaf = path->nodes[0];
250 slot = path->slots[0]; 297 slot = path->slots[0];
251 if (slot >= btrfs_header_nritems(leaf)) { 298 if (slot >= btrfs_header_nritems(leaf)) {
252 ret = btrfs_next_leaf(root, path); 299 ret = btrfs_next_leaf(fs_info->extent_root, path);
253 if (ret < 0) 300 if (ret < 0)
254 goto err; 301 goto err;
255 if (ret == 0) 302 else if (ret)
256 continue;
257 else
258 break; 303 break;
304
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info)) {
307 leaf = path->nodes[0];
308
309 /* this shouldn't happen, but if the
310 * leaf is empty just move on.
311 */
312 if (btrfs_header_nritems(leaf) == 0)
313 break;
314 /*
315 * we need to copy the key out so that
316 * we are sure the next search advances
317 * us forward in the btree.
318 */
319 btrfs_item_key_to_cpu(leaf, &key, 0);
320 btrfs_release_path(fs_info->extent_root, path);
321 up_read(&fs_info->extent_commit_sem);
322 schedule_timeout(1);
323 goto again;
324 }
325
326 continue;
259 } 327 }
260 btrfs_item_key_to_cpu(leaf, &key, slot); 328 btrfs_item_key_to_cpu(leaf, &key, slot);
261 if (key.objectid < block_group->key.objectid) 329 if (key.objectid < block_group->key.objectid)
@@ -266,24 +334,59 @@ static int cache_block_group(struct btrfs_root *root,
266 break; 334 break;
267 335
268 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { 336 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
269 add_new_free_space(block_group, root->fs_info, last, 337 total_found += add_new_free_space(block_group,
270 key.objectid); 338 fs_info, last,
271 339 key.objectid);
272 last = key.objectid + key.offset; 340 last = key.objectid + key.offset;
273 } 341 }
342
343 if (total_found > (1024 * 1024 * 2)) {
344 total_found = 0;
345 wake_up(&block_group->caching_q);
346 }
274next: 347next:
275 path->slots[0]++; 348 path->slots[0]++;
276 } 349 }
350 ret = 0;
277 351
278 add_new_free_space(block_group, root->fs_info, last, 352 total_found += add_new_free_space(block_group, fs_info, last,
279 block_group->key.objectid + 353 block_group->key.objectid +
280 block_group->key.offset); 354 block_group->key.offset);
355
356 spin_lock(&block_group->lock);
357 block_group->cached = BTRFS_CACHE_FINISHED;
358 spin_unlock(&block_group->lock);
281 359
282 block_group->cached = 1;
283 remove_sb_from_cache(root, block_group);
284 ret = 0;
285err: 360err:
286 btrfs_free_path(path); 361 btrfs_free_path(path);
362 up_read(&fs_info->extent_commit_sem);
363 atomic_dec(&block_group->space_info->caching_threads);
364 wake_up(&block_group->caching_q);
365
366 return 0;
367}
368
369static int cache_block_group(struct btrfs_block_group_cache *cache)
370{
371 struct task_struct *tsk;
372 int ret = 0;
373
374 spin_lock(&cache->lock);
375 if (cache->cached != BTRFS_CACHE_NO) {
376 spin_unlock(&cache->lock);
377 return ret;
378 }
379 cache->cached = BTRFS_CACHE_STARTED;
380 spin_unlock(&cache->lock);
381
382 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
383 cache->key.objectid);
384 if (IS_ERR(tsk)) {
385 ret = PTR_ERR(tsk);
386 printk(KERN_ERR "error running thread %d\n", ret);
387 BUG();
388 }
389
287 return ret; 390 return ret;
288} 391}
289 392
@@ -990,15 +1093,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
990 return type; 1093 return type;
991} 1094}
992 1095
993static int find_next_key(struct btrfs_path *path, struct btrfs_key *key) 1096static int find_next_key(struct btrfs_path *path, int level,
1097 struct btrfs_key *key)
994 1098
995{ 1099{
996 int level; 1100 for (; level < BTRFS_MAX_LEVEL; level++) {
997 BUG_ON(!path->keep_locks);
998 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
999 if (!path->nodes[level]) 1101 if (!path->nodes[level])
1000 break; 1102 break;
1001 btrfs_assert_tree_locked(path->nodes[level]);
1002 if (path->slots[level] + 1 >= 1103 if (path->slots[level] + 1 >=
1003 btrfs_header_nritems(path->nodes[level])) 1104 btrfs_header_nritems(path->nodes[level]))
1004 continue; 1105 continue;
@@ -1158,7 +1259,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1158 * For simplicity, we just do not add new inline back 1259 * For simplicity, we just do not add new inline back
1159 * ref if there is any kind of item for this block 1260 * ref if there is any kind of item for this block
1160 */ 1261 */
1161 if (find_next_key(path, &key) == 0 && key.objectid == bytenr && 1262 if (find_next_key(path, 0, &key) == 0 &&
1263 key.objectid == bytenr &&
1162 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 1264 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1163 err = -EAGAIN; 1265 err = -EAGAIN;
1164 goto out; 1266 goto out;
@@ -2388,13 +2490,29 @@ fail:
2388 2490
2389} 2491}
2390 2492
2493static struct btrfs_block_group_cache *
2494next_block_group(struct btrfs_root *root,
2495 struct btrfs_block_group_cache *cache)
2496{
2497 struct rb_node *node;
2498 spin_lock(&root->fs_info->block_group_cache_lock);
2499 node = rb_next(&cache->cache_node);
2500 btrfs_put_block_group(cache);
2501 if (node) {
2502 cache = rb_entry(node, struct btrfs_block_group_cache,
2503 cache_node);
2504 atomic_inc(&cache->count);
2505 } else
2506 cache = NULL;
2507 spin_unlock(&root->fs_info->block_group_cache_lock);
2508 return cache;
2509}
2510
2391int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2511int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2392 struct btrfs_root *root) 2512 struct btrfs_root *root)
2393{ 2513{
2394 struct btrfs_block_group_cache *cache, *entry; 2514 struct btrfs_block_group_cache *cache;
2395 struct rb_node *n;
2396 int err = 0; 2515 int err = 0;
2397 int werr = 0;
2398 struct btrfs_path *path; 2516 struct btrfs_path *path;
2399 u64 last = 0; 2517 u64 last = 0;
2400 2518
@@ -2403,39 +2521,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2403 return -ENOMEM; 2521 return -ENOMEM;
2404 2522
2405 while (1) { 2523 while (1) {
2406 cache = NULL; 2524 if (last == 0) {
2407 spin_lock(&root->fs_info->block_group_cache_lock); 2525 err = btrfs_run_delayed_refs(trans, root,
2408 for (n = rb_first(&root->fs_info->block_group_cache_tree); 2526 (unsigned long)-1);
2409 n; n = rb_next(n)) { 2527 BUG_ON(err);
2410 entry = rb_entry(n, struct btrfs_block_group_cache,
2411 cache_node);
2412 if (entry->dirty) {
2413 cache = entry;
2414 break;
2415 }
2416 } 2528 }
2417 spin_unlock(&root->fs_info->block_group_cache_lock);
2418 2529
2419 if (!cache) 2530 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2420 break; 2531 while (cache) {
2532 if (cache->dirty)
2533 break;
2534 cache = next_block_group(root, cache);
2535 }
2536 if (!cache) {
2537 if (last == 0)
2538 break;
2539 last = 0;
2540 continue;
2541 }
2421 2542
2422 cache->dirty = 0; 2543 cache->dirty = 0;
2423 last += cache->key.offset; 2544 last = cache->key.objectid + cache->key.offset;
2424 2545
2425 err = write_one_cache_group(trans, root, 2546 err = write_one_cache_group(trans, root, path, cache);
2426 path, cache); 2547 BUG_ON(err);
2427 /* 2548 btrfs_put_block_group(cache);
2428 * if we fail to write the cache group, we want
2429 * to keep it marked dirty in hopes that a later
2430 * write will work
2431 */
2432 if (err) {
2433 werr = err;
2434 continue;
2435 }
2436 } 2549 }
2550
2437 btrfs_free_path(path); 2551 btrfs_free_path(path);
2438 return werr; 2552 return 0;
2439} 2553}
2440 2554
2441int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 2555int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -2485,6 +2599,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2485 found->force_alloc = 0; 2599 found->force_alloc = 0;
2486 *space_info = found; 2600 *space_info = found;
2487 list_add_rcu(&found->list, &info->space_info); 2601 list_add_rcu(&found->list, &info->space_info);
2602 atomic_set(&found->caching_threads, 0);
2488 return 0; 2603 return 0;
2489} 2604}
2490 2605
@@ -2697,7 +2812,7 @@ again:
2697 2812
2698 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" 2813 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2699 ", %llu bytes_used, %llu bytes_reserved, " 2814 ", %llu bytes_used, %llu bytes_reserved, "
2700 "%llu bytes_pinned, %llu bytes_readonly, %llu may use" 2815 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2701 "%llu total\n", (unsigned long long)bytes, 2816 "%llu total\n", (unsigned long long)bytes,
2702 (unsigned long long)data_sinfo->bytes_delalloc, 2817 (unsigned long long)data_sinfo->bytes_delalloc,
2703 (unsigned long long)data_sinfo->bytes_used, 2818 (unsigned long long)data_sinfo->bytes_used,
@@ -2948,13 +3063,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2948 struct btrfs_block_group_cache *cache; 3063 struct btrfs_block_group_cache *cache;
2949 struct btrfs_fs_info *fs_info = root->fs_info; 3064 struct btrfs_fs_info *fs_info = root->fs_info;
2950 3065
2951 if (pin) { 3066 if (pin)
2952 set_extent_dirty(&fs_info->pinned_extents, 3067 set_extent_dirty(&fs_info->pinned_extents,
2953 bytenr, bytenr + num - 1, GFP_NOFS); 3068 bytenr, bytenr + num - 1, GFP_NOFS);
2954 } else {
2955 clear_extent_dirty(&fs_info->pinned_extents,
2956 bytenr, bytenr + num - 1, GFP_NOFS);
2957 }
2958 3069
2959 while (num > 0) { 3070 while (num > 0) {
2960 cache = btrfs_lookup_block_group(fs_info, bytenr); 3071 cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2970,14 +3081,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2970 spin_unlock(&cache->space_info->lock); 3081 spin_unlock(&cache->space_info->lock);
2971 fs_info->total_pinned += len; 3082 fs_info->total_pinned += len;
2972 } else { 3083 } else {
3084 int unpin = 0;
3085
3086 /*
3087 * in order to not race with the block group caching, we
3088 * only want to unpin the extent if we are cached. If
3089 * we aren't cached, we want to start async caching this
3090 * block group so we can free the extent the next time
3091 * around.
3092 */
2973 spin_lock(&cache->space_info->lock); 3093 spin_lock(&cache->space_info->lock);
2974 spin_lock(&cache->lock); 3094 spin_lock(&cache->lock);
2975 cache->pinned -= len; 3095 unpin = (cache->cached == BTRFS_CACHE_FINISHED);
2976 cache->space_info->bytes_pinned -= len; 3096 if (likely(unpin)) {
3097 cache->pinned -= len;
3098 cache->space_info->bytes_pinned -= len;
3099 fs_info->total_pinned -= len;
3100 }
2977 spin_unlock(&cache->lock); 3101 spin_unlock(&cache->lock);
2978 spin_unlock(&cache->space_info->lock); 3102 spin_unlock(&cache->space_info->lock);
2979 fs_info->total_pinned -= len; 3103
2980 if (cache->cached) 3104 if (likely(unpin))
3105 clear_extent_dirty(&fs_info->pinned_extents,
3106 bytenr, bytenr + len -1,
3107 GFP_NOFS);
3108 else
3109 cache_block_group(cache);
3110
3111 if (unpin)
2981 btrfs_add_free_space(cache, bytenr, len); 3112 btrfs_add_free_space(cache, bytenr, len);
2982 } 3113 }
2983 btrfs_put_block_group(cache); 3114 btrfs_put_block_group(cache);
@@ -3031,6 +3162,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3031 &start, &end, EXTENT_DIRTY); 3162 &start, &end, EXTENT_DIRTY);
3032 if (ret) 3163 if (ret)
3033 break; 3164 break;
3165
3034 set_extent_dirty(copy, start, end, GFP_NOFS); 3166 set_extent_dirty(copy, start, end, GFP_NOFS);
3035 last = end + 1; 3167 last = end + 1;
3036 } 3168 }
@@ -3059,6 +3191,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3059 3191
3060 cond_resched(); 3192 cond_resched();
3061 } 3193 }
3194
3062 return ret; 3195 return ret;
3063} 3196}
3064 3197
@@ -3437,6 +3570,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
3437} 3570}
3438 3571
3439/* 3572/*
3573 * when we wait for progress in the block group caching, its because
3574 * our allocation attempt failed at least once. So, we must sleep
3575 * and let some progress happen before we try again.
3576 *
3577 * This function will sleep at least once waiting for new free space to
3578 * show up, and then it will check the block group free space numbers
3579 * for our min num_bytes. Another option is to have it go ahead
3580 * and look in the rbtree for a free extent of a given size, but this
3581 * is a good start.
3582 */
3583static noinline int
3584wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3585 u64 num_bytes)
3586{
3587 DEFINE_WAIT(wait);
3588
3589 prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3590
3591 if (block_group_cache_done(cache)) {
3592 finish_wait(&cache->caching_q, &wait);
3593 return 0;
3594 }
3595 schedule();
3596 finish_wait(&cache->caching_q, &wait);
3597
3598 wait_event(cache->caching_q, block_group_cache_done(cache) ||
3599 (cache->free_space >= num_bytes));
3600 return 0;
3601}
3602
3603enum btrfs_loop_type {
3604 LOOP_CACHED_ONLY = 0,
3605 LOOP_CACHING_NOWAIT = 1,
3606 LOOP_CACHING_WAIT = 2,
3607 LOOP_ALLOC_CHUNK = 3,
3608 LOOP_NO_EMPTY_SIZE = 4,
3609};
3610
3611/*
3440 * walks the btree of allocated extents and find a hole of a given size. 3612 * walks the btree of allocated extents and find a hole of a given size.
3441 * The key ins is changed to record the hole: 3613 * The key ins is changed to record the hole:
3442 * ins->objectid == block start 3614 * ins->objectid == block start
@@ -3461,6 +3633,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3461 struct btrfs_space_info *space_info; 3633 struct btrfs_space_info *space_info;
3462 int last_ptr_loop = 0; 3634 int last_ptr_loop = 0;
3463 int loop = 0; 3635 int loop = 0;
3636 bool found_uncached_bg = false;
3464 3637
3465 WARN_ON(num_bytes < root->sectorsize); 3638 WARN_ON(num_bytes < root->sectorsize);
3466 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 3639 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3492,15 +3665,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3492 search_start = max(search_start, first_logical_byte(root, 0)); 3665 search_start = max(search_start, first_logical_byte(root, 0));
3493 search_start = max(search_start, hint_byte); 3666 search_start = max(search_start, hint_byte);
3494 3667
3495 if (!last_ptr) { 3668 if (!last_ptr)
3496 empty_cluster = 0; 3669 empty_cluster = 0;
3497 loop = 1;
3498 }
3499 3670
3500 if (search_start == hint_byte) { 3671 if (search_start == hint_byte) {
3501 block_group = btrfs_lookup_block_group(root->fs_info, 3672 block_group = btrfs_lookup_block_group(root->fs_info,
3502 search_start); 3673 search_start);
3503 if (block_group && block_group_bits(block_group, data)) { 3674 /*
3675 * we don't want to use the block group if it doesn't match our
3676 * allocation bits, or if its not cached.
3677 */
3678 if (block_group && block_group_bits(block_group, data) &&
3679 block_group_cache_done(block_group)) {
3504 down_read(&space_info->groups_sem); 3680 down_read(&space_info->groups_sem);
3505 if (list_empty(&block_group->list) || 3681 if (list_empty(&block_group->list) ||
3506 block_group->ro) { 3682 block_group->ro) {
@@ -3523,21 +3699,35 @@ search:
3523 down_read(&space_info->groups_sem); 3699 down_read(&space_info->groups_sem);
3524 list_for_each_entry(block_group, &space_info->block_groups, list) { 3700 list_for_each_entry(block_group, &space_info->block_groups, list) {
3525 u64 offset; 3701 u64 offset;
3702 int cached;
3526 3703
3527 atomic_inc(&block_group->count); 3704 atomic_inc(&block_group->count);
3528 search_start = block_group->key.objectid; 3705 search_start = block_group->key.objectid;
3529 3706
3530have_block_group: 3707have_block_group:
3531 if (unlikely(!block_group->cached)) { 3708 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3532 mutex_lock(&block_group->cache_mutex); 3709 /*
3533 ret = cache_block_group(root, block_group); 3710 * we want to start caching kthreads, but not too many
3534 mutex_unlock(&block_group->cache_mutex); 3711 * right off the bat so we don't overwhelm the system,
3535 if (ret) { 3712 * so only start them if there are less than 2 and we're
3536 btrfs_put_block_group(block_group); 3713 * in the initial allocation phase.
3537 break; 3714 */
3715 if (loop > LOOP_CACHING_NOWAIT ||
3716 atomic_read(&space_info->caching_threads) < 2) {
3717 ret = cache_block_group(block_group);
3718 BUG_ON(ret);
3538 } 3719 }
3539 } 3720 }
3540 3721
3722 cached = block_group_cache_done(block_group);
3723 if (unlikely(!cached)) {
3724 found_uncached_bg = true;
3725
3726 /* if we only want cached bgs, loop */
3727 if (loop == LOOP_CACHED_ONLY)
3728 goto loop;
3729 }
3730
3541 if (unlikely(block_group->ro)) 3731 if (unlikely(block_group->ro))
3542 goto loop; 3732 goto loop;
3543 3733
@@ -3616,14 +3806,21 @@ refill_cluster:
3616 spin_unlock(&last_ptr->refill_lock); 3806 spin_unlock(&last_ptr->refill_lock);
3617 goto checks; 3807 goto checks;
3618 } 3808 }
3809 } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3810 spin_unlock(&last_ptr->refill_lock);
3811
3812 wait_block_group_cache_progress(block_group,
3813 num_bytes + empty_cluster + empty_size);
3814 goto have_block_group;
3619 } 3815 }
3816
3620 /* 3817 /*
3621 * at this point we either didn't find a cluster 3818 * at this point we either didn't find a cluster
3622 * or we weren't able to allocate a block from our 3819 * or we weren't able to allocate a block from our
3623 * cluster. Free the cluster we've been trying 3820 * cluster. Free the cluster we've been trying
3624 * to use, and go to the next block group 3821 * to use, and go to the next block group
3625 */ 3822 */
3626 if (loop < 2) { 3823 if (loop < LOOP_NO_EMPTY_SIZE) {
3627 btrfs_return_cluster_to_free_space(NULL, 3824 btrfs_return_cluster_to_free_space(NULL,
3628 last_ptr); 3825 last_ptr);
3629 spin_unlock(&last_ptr->refill_lock); 3826 spin_unlock(&last_ptr->refill_lock);
@@ -3634,11 +3831,17 @@ refill_cluster:
3634 3831
3635 offset = btrfs_find_space_for_alloc(block_group, search_start, 3832 offset = btrfs_find_space_for_alloc(block_group, search_start,
3636 num_bytes, empty_size); 3833 num_bytes, empty_size);
3637 if (!offset) 3834 if (!offset && (cached || (!cached &&
3835 loop == LOOP_CACHING_NOWAIT))) {
3638 goto loop; 3836 goto loop;
3837 } else if (!offset && (!cached &&
3838 loop > LOOP_CACHING_NOWAIT)) {
3839 wait_block_group_cache_progress(block_group,
3840 num_bytes + empty_size);
3841 goto have_block_group;
3842 }
3639checks: 3843checks:
3640 search_start = stripe_align(root, offset); 3844 search_start = stripe_align(root, offset);
3641
3642 /* move on to the next group */ 3845 /* move on to the next group */
3643 if (search_start + num_bytes >= search_end) { 3846 if (search_start + num_bytes >= search_end) {
3644 btrfs_add_free_space(block_group, offset, num_bytes); 3847 btrfs_add_free_space(block_group, offset, num_bytes);
@@ -3684,13 +3887,26 @@ loop:
3684 } 3887 }
3685 up_read(&space_info->groups_sem); 3888 up_read(&space_info->groups_sem);
3686 3889
3687 /* loop == 0, try to find a clustered alloc in every block group 3890 /* LOOP_CACHED_ONLY, only search fully cached block groups
3688 * loop == 1, try again after forcing a chunk allocation 3891 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3689 * loop == 2, set empty_size and empty_cluster to 0 and try again 3892 * dont wait foR them to finish caching
3893 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3894 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3895 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3896 * again
3690 */ 3897 */
3691 if (!ins->objectid && loop < 3 && 3898 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3692 (empty_size || empty_cluster || allowed_chunk_alloc)) { 3899 (found_uncached_bg || empty_size || empty_cluster ||
3693 if (loop >= 2) { 3900 allowed_chunk_alloc)) {
3901 if (found_uncached_bg) {
3902 found_uncached_bg = false;
3903 if (loop < LOOP_CACHING_WAIT) {
3904 loop++;
3905 goto search;
3906 }
3907 }
3908
3909 if (loop == LOOP_ALLOC_CHUNK) {
3694 empty_size = 0; 3910 empty_size = 0;
3695 empty_cluster = 0; 3911 empty_cluster = 0;
3696 } 3912 }
@@ -3703,7 +3919,7 @@ loop:
3703 space_info->force_alloc = 1; 3919 space_info->force_alloc = 1;
3704 } 3920 }
3705 3921
3706 if (loop < 3) { 3922 if (loop < LOOP_NO_EMPTY_SIZE) {
3707 loop++; 3923 loop++;
3708 goto search; 3924 goto search;
3709 } 3925 }
@@ -3799,7 +4015,7 @@ again:
3799 num_bytes, data, 1); 4015 num_bytes, data, 1);
3800 goto again; 4016 goto again;
3801 } 4017 }
3802 if (ret) { 4018 if (ret == -ENOSPC) {
3803 struct btrfs_space_info *sinfo; 4019 struct btrfs_space_info *sinfo;
3804 4020
3805 sinfo = __find_space_info(root->fs_info, data); 4021 sinfo = __find_space_info(root->fs_info, data);
@@ -3807,7 +4023,6 @@ again:
3807 "wanted %llu\n", (unsigned long long)data, 4023 "wanted %llu\n", (unsigned long long)data,
3808 (unsigned long long)num_bytes); 4024 (unsigned long long)num_bytes);
3809 dump_space_info(sinfo, num_bytes); 4025 dump_space_info(sinfo, num_bytes);
3810 BUG();
3811 } 4026 }
3812 4027
3813 return ret; 4028 return ret;
@@ -3845,7 +4060,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3845 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, 4060 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3846 empty_size, hint_byte, search_end, ins, 4061 empty_size, hint_byte, search_end, ins,
3847 data); 4062 data);
3848 update_reserved_extents(root, ins->objectid, ins->offset, 1); 4063 if (!ret)
4064 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4065
3849 return ret; 4066 return ret;
3850} 4067}
3851 4068
@@ -4007,9 +4224,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4007 struct btrfs_block_group_cache *block_group; 4224 struct btrfs_block_group_cache *block_group;
4008 4225
4009 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 4226 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4010 mutex_lock(&block_group->cache_mutex); 4227 cache_block_group(block_group);
4011 cache_block_group(root, block_group); 4228 wait_event(block_group->caching_q,
4012 mutex_unlock(&block_group->cache_mutex); 4229 block_group_cache_done(block_group));
4013 4230
4014 ret = btrfs_remove_free_space(block_group, ins->objectid, 4231 ret = btrfs_remove_free_space(block_group, ins->objectid,
4015 ins->offset); 4232 ins->offset);
@@ -4040,7 +4257,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
4040 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, 4257 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4041 empty_size, hint_byte, search_end, 4258 empty_size, hint_byte, search_end,
4042 ins, 0); 4259 ins, 0);
4043 BUG_ON(ret); 4260 if (ret)
4261 return ret;
4044 4262
4045 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4263 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4046 if (parent == 0) 4264 if (parent == 0)
@@ -4128,6 +4346,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4128 return buf; 4346 return buf;
4129} 4347}
4130 4348
4349#if 0
4131int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, 4350int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4132 struct btrfs_root *root, struct extent_buffer *leaf) 4351 struct btrfs_root *root, struct extent_buffer *leaf)
4133{ 4352{
@@ -4171,8 +4390,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4171 return 0; 4390 return 0;
4172} 4391}
4173 4392
4174#if 0
4175
4176static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, 4393static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
4177 struct btrfs_root *root, 4394 struct btrfs_root *root,
4178 struct btrfs_leaf_ref *ref) 4395 struct btrfs_leaf_ref *ref)
@@ -4553,262 +4770,471 @@ out:
4553} 4770}
4554#endif 4771#endif
4555 4772
4773struct walk_control {
4774 u64 refs[BTRFS_MAX_LEVEL];
4775 u64 flags[BTRFS_MAX_LEVEL];
4776 struct btrfs_key update_progress;
4777 int stage;
4778 int level;
4779 int shared_level;
4780 int update_ref;
4781 int keep_locks;
4782};
4783
4784#define DROP_REFERENCE 1
4785#define UPDATE_BACKREF 2
4786
4556/* 4787/*
4557 * helper function for drop_subtree, this function is similar to 4788 * hepler to process tree block while walking down the tree.
4558 * walk_down_tree. The main difference is that it checks reference 4789 *
4559 * counts while tree blocks are locked. 4790 * when wc->stage == DROP_REFERENCE, this function checks
4791 * reference count of the block. if the block is shared and
4792 * we need update back refs for the subtree rooted at the
4793 * block, this function changes wc->stage to UPDATE_BACKREF
4794 *
4795 * when wc->stage == UPDATE_BACKREF, this function updates
4796 * back refs for pointers in the block.
4797 *
4798 * NOTE: return value 1 means we should stop walking down.
4560 */ 4799 */
4561static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 4800static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4562 struct btrfs_root *root, 4801 struct btrfs_root *root,
4563 struct btrfs_path *path, int *level) 4802 struct btrfs_path *path,
4803 struct walk_control *wc)
4564{ 4804{
4565 struct extent_buffer *next; 4805 int level = wc->level;
4566 struct extent_buffer *cur; 4806 struct extent_buffer *eb = path->nodes[level];
4567 struct extent_buffer *parent; 4807 struct btrfs_key key;
4568 u64 bytenr; 4808 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4569 u64 ptr_gen;
4570 u64 refs;
4571 u64 flags;
4572 u32 blocksize;
4573 int ret; 4809 int ret;
4574 4810
4575 cur = path->nodes[*level]; 4811 if (wc->stage == UPDATE_BACKREF &&
4576 ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len, 4812 btrfs_header_owner(eb) != root->root_key.objectid)
4577 &refs, &flags); 4813 return 1;
4578 BUG_ON(ret);
4579 if (refs > 1)
4580 goto out;
4581 4814
4582 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 4815 /*
4816 * when reference count of tree block is 1, it won't increase
4817 * again. once full backref flag is set, we never clear it.
4818 */
4819 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4820 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4821 BUG_ON(!path->locks[level]);
4822 ret = btrfs_lookup_extent_info(trans, root,
4823 eb->start, eb->len,
4824 &wc->refs[level],
4825 &wc->flags[level]);
4826 BUG_ON(ret);
4827 BUG_ON(wc->refs[level] == 0);
4828 }
4583 4829
4584 while (*level >= 0) { 4830 if (wc->stage == DROP_REFERENCE &&
4585 cur = path->nodes[*level]; 4831 wc->update_ref && wc->refs[level] > 1) {
4586 if (*level == 0) { 4832 BUG_ON(eb == root->node);
4587 ret = btrfs_drop_leaf_ref(trans, root, cur); 4833 BUG_ON(path->slots[level] > 0);
4588 BUG_ON(ret); 4834 if (level == 0)
4589 clean_tree_block(trans, root, cur); 4835 btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
4590 break; 4836 else
4591 } 4837 btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
4592 if (path->slots[*level] >= btrfs_header_nritems(cur)) { 4838 if (btrfs_header_owner(eb) == root->root_key.objectid &&
4593 clean_tree_block(trans, root, cur); 4839 btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
4594 break; 4840 wc->stage = UPDATE_BACKREF;
4841 wc->shared_level = level;
4595 } 4842 }
4843 }
4596 4844
4597 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 4845 if (wc->stage == DROP_REFERENCE) {
4598 blocksize = btrfs_level_size(root, *level - 1); 4846 if (wc->refs[level] > 1)
4599 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 4847 return 1;
4600 4848
4601 next = read_tree_block(root, bytenr, blocksize, ptr_gen); 4849 if (path->locks[level] && !wc->keep_locks) {
4602 btrfs_tree_lock(next); 4850 btrfs_tree_unlock(eb);
4603 btrfs_set_lock_blocking(next); 4851 path->locks[level] = 0;
4852 }
4853 return 0;
4854 }
4604 4855
4605 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 4856 /* wc->stage == UPDATE_BACKREF */
4606 &refs, &flags); 4857 if (!(wc->flags[level] & flag)) {
4858 BUG_ON(!path->locks[level]);
4859 ret = btrfs_inc_ref(trans, root, eb, 1);
4607 BUG_ON(ret); 4860 BUG_ON(ret);
4608 if (refs > 1) { 4861 ret = btrfs_dec_ref(trans, root, eb, 0);
4609 parent = path->nodes[*level]; 4862 BUG_ON(ret);
4610 ret = btrfs_free_extent(trans, root, bytenr, 4863 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4611 blocksize, parent->start, 4864 eb->len, flag, 0);
4612 btrfs_header_owner(parent), 4865 BUG_ON(ret);
4613 *level - 1, 0); 4866 wc->flags[level] |= flag;
4867 }
4868
4869 /*
4870 * the block is shared by multiple trees, so it's not good to
4871 * keep the tree lock
4872 */
4873 if (path->locks[level] && level > 0) {
4874 btrfs_tree_unlock(eb);
4875 path->locks[level] = 0;
4876 }
4877 return 0;
4878}
4879
4880/*
4881 * hepler to process tree block while walking up the tree.
4882 *
4883 * when wc->stage == DROP_REFERENCE, this function drops
4884 * reference count on the block.
4885 *
4886 * when wc->stage == UPDATE_BACKREF, this function changes
4887 * wc->stage back to DROP_REFERENCE if we changed wc->stage
4888 * to UPDATE_BACKREF previously while processing the block.
4889 *
4890 * NOTE: return value 1 means we should stop walking up.
4891 */
4892static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4893 struct btrfs_root *root,
4894 struct btrfs_path *path,
4895 struct walk_control *wc)
4896{
4897 int ret = 0;
4898 int level = wc->level;
4899 struct extent_buffer *eb = path->nodes[level];
4900 u64 parent = 0;
4901
4902 if (wc->stage == UPDATE_BACKREF) {
4903 BUG_ON(wc->shared_level < level);
4904 if (level < wc->shared_level)
4905 goto out;
4906
4907 BUG_ON(wc->refs[level] <= 1);
4908 ret = find_next_key(path, level + 1, &wc->update_progress);
4909 if (ret > 0)
4910 wc->update_ref = 0;
4911
4912 wc->stage = DROP_REFERENCE;
4913 wc->shared_level = -1;
4914 path->slots[level] = 0;
4915
4916 /*
4917 * check reference count again if the block isn't locked.
4918 * we should start walking down the tree again if reference
4919 * count is one.
4920 */
4921 if (!path->locks[level]) {
4922 BUG_ON(level == 0);
4923 btrfs_tree_lock(eb);
4924 btrfs_set_lock_blocking(eb);
4925 path->locks[level] = 1;
4926
4927 ret = btrfs_lookup_extent_info(trans, root,
4928 eb->start, eb->len,
4929 &wc->refs[level],
4930 &wc->flags[level]);
4614 BUG_ON(ret); 4931 BUG_ON(ret);
4615 path->slots[*level]++; 4932 BUG_ON(wc->refs[level] == 0);
4616 btrfs_tree_unlock(next); 4933 if (wc->refs[level] == 1) {
4617 free_extent_buffer(next); 4934 btrfs_tree_unlock(eb);
4618 continue; 4935 path->locks[level] = 0;
4936 return 1;
4937 }
4938 } else {
4939 BUG_ON(level != 0);
4619 } 4940 }
4941 }
4620 4942
4621 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 4943 /* wc->stage == DROP_REFERENCE */
4944 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4622 4945
4623 *level = btrfs_header_level(next); 4946 if (wc->refs[level] == 1) {
4624 path->nodes[*level] = next; 4947 if (level == 0) {
4625 path->slots[*level] = 0; 4948 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4626 path->locks[*level] = 1; 4949 ret = btrfs_dec_ref(trans, root, eb, 1);
4627 cond_resched(); 4950 else
4951 ret = btrfs_dec_ref(trans, root, eb, 0);
4952 BUG_ON(ret);
4953 }
4954 /* make block locked assertion in clean_tree_block happy */
4955 if (!path->locks[level] &&
4956 btrfs_header_generation(eb) == trans->transid) {
4957 btrfs_tree_lock(eb);
4958 btrfs_set_lock_blocking(eb);
4959 path->locks[level] = 1;
4960 }
4961 clean_tree_block(trans, root, eb);
4628 } 4962 }
4629out:
4630 if (path->nodes[*level] == root->node)
4631 parent = path->nodes[*level];
4632 else
4633 parent = path->nodes[*level + 1];
4634 bytenr = path->nodes[*level]->start;
4635 blocksize = path->nodes[*level]->len;
4636 4963
4637 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, 4964 if (eb == root->node) {
4638 btrfs_header_owner(parent), *level, 0); 4965 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4966 parent = eb->start;
4967 else
4968 BUG_ON(root->root_key.objectid !=
4969 btrfs_header_owner(eb));
4970 } else {
4971 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4972 parent = path->nodes[level + 1]->start;
4973 else
4974 BUG_ON(root->root_key.objectid !=
4975 btrfs_header_owner(path->nodes[level + 1]));
4976 }
4977
4978 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4979 root->root_key.objectid, level, 0);
4639 BUG_ON(ret); 4980 BUG_ON(ret);
4981out:
4982 wc->refs[level] = 0;
4983 wc->flags[level] = 0;
4984 return ret;
4985}
4986
4987static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4988 struct btrfs_root *root,
4989 struct btrfs_path *path,
4990 struct walk_control *wc)
4991{
4992 struct extent_buffer *next;
4993 struct extent_buffer *cur;
4994 u64 bytenr;
4995 u64 ptr_gen;
4996 u32 blocksize;
4997 int level = wc->level;
4998 int ret;
4999
5000 while (level >= 0) {
5001 cur = path->nodes[level];
5002 BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
5003
5004 ret = walk_down_proc(trans, root, path, wc);
5005 if (ret > 0)
5006 break;
5007
5008 if (level == 0)
5009 break;
5010
5011 bytenr = btrfs_node_blockptr(cur, path->slots[level]);
5012 blocksize = btrfs_level_size(root, level - 1);
5013 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
5014
5015 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
5016 btrfs_tree_lock(next);
5017 btrfs_set_lock_blocking(next);
4640 5018
4641 if (path->locks[*level]) { 5019 level--;
4642 btrfs_tree_unlock(path->nodes[*level]); 5020 BUG_ON(level != btrfs_header_level(next));
4643 path->locks[*level] = 0; 5021 path->nodes[level] = next;
5022 path->slots[level] = 0;
5023 path->locks[level] = 1;
5024 wc->level = level;
4644 } 5025 }
4645 free_extent_buffer(path->nodes[*level]);
4646 path->nodes[*level] = NULL;
4647 *level += 1;
4648 cond_resched();
4649 return 0; 5026 return 0;
4650} 5027}
4651 5028
4652/*
4653 * helper for dropping snapshots. This walks back up the tree in the path
4654 * to find the first node higher up where we haven't yet gone through
4655 * all the slots
4656 */
4657static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5029static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4658 struct btrfs_root *root, 5030 struct btrfs_root *root,
4659 struct btrfs_path *path, 5031 struct btrfs_path *path,
4660 int *level, int max_level) 5032 struct walk_control *wc, int max_level)
4661{ 5033{
4662 struct btrfs_root_item *root_item = &root->root_item; 5034 int level = wc->level;
4663 int i;
4664 int slot;
4665 int ret; 5035 int ret;
4666 5036
4667 for (i = *level; i < max_level && path->nodes[i]; i++) { 5037 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
4668 slot = path->slots[i]; 5038 while (level < max_level && path->nodes[level]) {
4669 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 5039 wc->level = level;
4670 /* 5040 if (path->slots[level] + 1 <
4671 * there is more work to do in this level. 5041 btrfs_header_nritems(path->nodes[level])) {
4672 * Update the drop_progress marker to reflect 5042 path->slots[level]++;
4673 * the work we've done so far, and then bump
4674 * the slot number
4675 */
4676 path->slots[i]++;
4677 WARN_ON(*level == 0);
4678 if (max_level == BTRFS_MAX_LEVEL) {
4679 btrfs_node_key(path->nodes[i],
4680 &root_item->drop_progress,
4681 path->slots[i]);
4682 root_item->drop_level = i;
4683 }
4684 *level = i;
4685 return 0; 5043 return 0;
4686 } else { 5044 } else {
4687 struct extent_buffer *parent; 5045 ret = walk_up_proc(trans, root, path, wc);
4688 5046 if (ret > 0)
4689 /* 5047 return 0;
4690 * this whole node is done, free our reference
4691 * on it and go up one level
4692 */
4693 if (path->nodes[*level] == root->node)
4694 parent = path->nodes[*level];
4695 else
4696 parent = path->nodes[*level + 1];
4697 5048
4698 clean_tree_block(trans, root, path->nodes[i]); 5049 if (path->locks[level]) {
4699 ret = btrfs_free_extent(trans, root, 5050 btrfs_tree_unlock(path->nodes[level]);
4700 path->nodes[i]->start, 5051 path->locks[level] = 0;
4701 path->nodes[i]->len,
4702 parent->start,
4703 btrfs_header_owner(parent),
4704 *level, 0);
4705 BUG_ON(ret);
4706 if (path->locks[*level]) {
4707 btrfs_tree_unlock(path->nodes[i]);
4708 path->locks[i] = 0;
4709 } 5052 }
4710 free_extent_buffer(path->nodes[i]); 5053 free_extent_buffer(path->nodes[level]);
4711 path->nodes[i] = NULL; 5054 path->nodes[level] = NULL;
4712 *level = i + 1; 5055 level++;
4713 } 5056 }
4714 } 5057 }
4715 return 1; 5058 return 1;
4716} 5059}
4717 5060
4718/* 5061/*
4719 * drop the reference count on the tree rooted at 'snap'. This traverses 5062 * drop a subvolume tree.
4720 * the tree freeing any blocks that have a ref count of zero after being 5063 *
4721 * decremented. 5064 * this function traverses the tree freeing any blocks that only
5065 * referenced by the tree.
5066 *
5067 * when a shared tree block is found. this function decreases its
5068 * reference count by one. if update_ref is true, this function
5069 * also make sure backrefs for the shared block and all lower level
5070 * blocks are properly updated.
4722 */ 5071 */
4723int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root 5072int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
4724 *root)
4725{ 5073{
4726 int ret = 0;
4727 int wret;
4728 int level;
4729 struct btrfs_path *path; 5074 struct btrfs_path *path;
4730 int update_count; 5075 struct btrfs_trans_handle *trans;
5076 struct btrfs_root *tree_root = root->fs_info->tree_root;
4731 struct btrfs_root_item *root_item = &root->root_item; 5077 struct btrfs_root_item *root_item = &root->root_item;
5078 struct walk_control *wc;
5079 struct btrfs_key key;
5080 int err = 0;
5081 int ret;
5082 int level;
4732 5083
4733 path = btrfs_alloc_path(); 5084 path = btrfs_alloc_path();
4734 BUG_ON(!path); 5085 BUG_ON(!path);
4735 5086
4736 level = btrfs_header_level(root->node); 5087 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5088 BUG_ON(!wc);
5089
5090 trans = btrfs_start_transaction(tree_root, 1);
5091
4737 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5092 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5093 level = btrfs_header_level(root->node);
4738 path->nodes[level] = btrfs_lock_root_node(root); 5094 path->nodes[level] = btrfs_lock_root_node(root);
4739 btrfs_set_lock_blocking(path->nodes[level]); 5095 btrfs_set_lock_blocking(path->nodes[level]);
4740 path->slots[level] = 0; 5096 path->slots[level] = 0;
4741 path->locks[level] = 1; 5097 path->locks[level] = 1;
5098 memset(&wc->update_progress, 0,
5099 sizeof(wc->update_progress));
4742 } else { 5100 } else {
4743 struct btrfs_key key;
4744 struct btrfs_disk_key found_key;
4745 struct extent_buffer *node;
4746
4747 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5101 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5102 memcpy(&wc->update_progress, &key,
5103 sizeof(wc->update_progress));
5104
4748 level = root_item->drop_level; 5105 level = root_item->drop_level;
5106 BUG_ON(level == 0);
4749 path->lowest_level = level; 5107 path->lowest_level = level;
4750 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5108 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4751 if (wret < 0) { 5109 path->lowest_level = 0;
4752 ret = wret; 5110 if (ret < 0) {
5111 err = ret;
4753 goto out; 5112 goto out;
4754 } 5113 }
4755 node = path->nodes[level]; 5114 btrfs_node_key_to_cpu(path->nodes[level], &key,
4756 btrfs_node_key(node, &found_key, path->slots[level]); 5115 path->slots[level]);
4757 WARN_ON(memcmp(&found_key, &root_item->drop_progress, 5116 WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
4758 sizeof(found_key))); 5117
4759 /* 5118 /*
4760 * unlock our path, this is safe because only this 5119 * unlock our path, this is safe because only this
4761 * function is allowed to delete this snapshot 5120 * function is allowed to delete this snapshot
4762 */ 5121 */
4763 btrfs_unlock_up_safe(path, 0); 5122 btrfs_unlock_up_safe(path, 0);
5123
5124 level = btrfs_header_level(root->node);
5125 while (1) {
5126 btrfs_tree_lock(path->nodes[level]);
5127 btrfs_set_lock_blocking(path->nodes[level]);
5128
5129 ret = btrfs_lookup_extent_info(trans, root,
5130 path->nodes[level]->start,
5131 path->nodes[level]->len,
5132 &wc->refs[level],
5133 &wc->flags[level]);
5134 BUG_ON(ret);
5135 BUG_ON(wc->refs[level] == 0);
5136
5137 if (level == root_item->drop_level)
5138 break;
5139
5140 btrfs_tree_unlock(path->nodes[level]);
5141 WARN_ON(wc->refs[level] != 1);
5142 level--;
5143 }
4764 } 5144 }
5145
5146 wc->level = level;
5147 wc->shared_level = -1;
5148 wc->stage = DROP_REFERENCE;
5149 wc->update_ref = update_ref;
5150 wc->keep_locks = 0;
5151
4765 while (1) { 5152 while (1) {
4766 unsigned long update; 5153 ret = walk_down_tree(trans, root, path, wc);
4767 wret = walk_down_tree(trans, root, path, &level); 5154 if (ret < 0) {
4768 if (wret > 0) 5155 err = ret;
4769 break; 5156 break;
4770 if (wret < 0) 5157 }
4771 ret = wret;
4772 5158
4773 wret = walk_up_tree(trans, root, path, &level, 5159 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
4774 BTRFS_MAX_LEVEL); 5160 if (ret < 0) {
4775 if (wret > 0) 5161 err = ret;
4776 break; 5162 break;
4777 if (wret < 0) 5163 }
4778 ret = wret; 5164
4779 if (trans->transaction->in_commit || 5165 if (ret > 0) {
4780 trans->transaction->delayed_refs.flushing) { 5166 BUG_ON(wc->stage != DROP_REFERENCE);
4781 ret = -EAGAIN;
4782 break; 5167 break;
4783 } 5168 }
4784 for (update_count = 0; update_count < 16; update_count++) { 5169
5170 if (wc->stage == DROP_REFERENCE) {
5171 level = wc->level;
5172 btrfs_node_key(path->nodes[level],
5173 &root_item->drop_progress,
5174 path->slots[level]);
5175 root_item->drop_level = level;
5176 }
5177
5178 BUG_ON(wc->level == 0);
5179 if (trans->transaction->in_commit ||
5180 trans->transaction->delayed_refs.flushing) {
5181 ret = btrfs_update_root(trans, tree_root,
5182 &root->root_key,
5183 root_item);
5184 BUG_ON(ret);
5185
5186 btrfs_end_transaction(trans, tree_root);
5187 trans = btrfs_start_transaction(tree_root, 1);
5188 } else {
5189 unsigned long update;
4785 update = trans->delayed_ref_updates; 5190 update = trans->delayed_ref_updates;
4786 trans->delayed_ref_updates = 0; 5191 trans->delayed_ref_updates = 0;
4787 if (update) 5192 if (update)
4788 btrfs_run_delayed_refs(trans, root, update); 5193 btrfs_run_delayed_refs(trans, tree_root,
4789 else 5194 update);
4790 break;
4791 } 5195 }
4792 } 5196 }
5197 btrfs_release_path(root, path);
5198 BUG_ON(err);
5199
5200 ret = btrfs_del_root(trans, tree_root, &root->root_key);
5201 BUG_ON(ret);
5202
5203 free_extent_buffer(root->node);
5204 free_extent_buffer(root->commit_root);
5205 kfree(root);
4793out: 5206out:
5207 btrfs_end_transaction(trans, tree_root);
5208 kfree(wc);
4794 btrfs_free_path(path); 5209 btrfs_free_path(path);
4795 return ret; 5210 return err;
4796} 5211}
4797 5212
5213/*
5214 * drop subtree rooted at tree block 'node'.
5215 *
5216 * NOTE: this function will unlock and release tree block 'node'
5217 */
4798int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5218int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4799 struct btrfs_root *root, 5219 struct btrfs_root *root,
4800 struct extent_buffer *node, 5220 struct extent_buffer *node,
4801 struct extent_buffer *parent) 5221 struct extent_buffer *parent)
4802{ 5222{
4803 struct btrfs_path *path; 5223 struct btrfs_path *path;
5224 struct walk_control *wc;
4804 int level; 5225 int level;
4805 int parent_level; 5226 int parent_level;
4806 int ret = 0; 5227 int ret = 0;
4807 int wret; 5228 int wret;
4808 5229
5230 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5231
4809 path = btrfs_alloc_path(); 5232 path = btrfs_alloc_path();
4810 BUG_ON(!path); 5233 BUG_ON(!path);
4811 5234
5235 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5236 BUG_ON(!wc);
5237
4812 btrfs_assert_tree_locked(parent); 5238 btrfs_assert_tree_locked(parent);
4813 parent_level = btrfs_header_level(parent); 5239 parent_level = btrfs_header_level(parent);
4814 extent_buffer_get(parent); 5240 extent_buffer_get(parent);
@@ -4817,24 +5243,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4817 5243
4818 btrfs_assert_tree_locked(node); 5244 btrfs_assert_tree_locked(node);
4819 level = btrfs_header_level(node); 5245 level = btrfs_header_level(node);
4820 extent_buffer_get(node);
4821 path->nodes[level] = node; 5246 path->nodes[level] = node;
4822 path->slots[level] = 0; 5247 path->slots[level] = 0;
5248 path->locks[level] = 1;
5249
5250 wc->refs[parent_level] = 1;
5251 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5252 wc->level = level;
5253 wc->shared_level = -1;
5254 wc->stage = DROP_REFERENCE;
5255 wc->update_ref = 0;
5256 wc->keep_locks = 1;
4823 5257
4824 while (1) { 5258 while (1) {
4825 wret = walk_down_tree(trans, root, path, &level); 5259 wret = walk_down_tree(trans, root, path, wc);
4826 if (wret < 0) 5260 if (wret < 0) {
4827 ret = wret; 5261 ret = wret;
4828 if (wret != 0)
4829 break; 5262 break;
5263 }
4830 5264
4831 wret = walk_up_tree(trans, root, path, &level, parent_level); 5265 wret = walk_up_tree(trans, root, path, wc, parent_level);
4832 if (wret < 0) 5266 if (wret < 0)
4833 ret = wret; 5267 ret = wret;
4834 if (wret != 0) 5268 if (wret != 0)
4835 break; 5269 break;
4836 } 5270 }
4837 5271
5272 kfree(wc);
4838 btrfs_free_path(path); 5273 btrfs_free_path(path);
4839 return ret; 5274 return ret;
4840} 5275}
@@ -6739,11 +7174,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
6739 &info->block_group_cache_tree); 7174 &info->block_group_cache_tree);
6740 spin_unlock(&info->block_group_cache_lock); 7175 spin_unlock(&info->block_group_cache_lock);
6741 7176
6742 btrfs_remove_free_space_cache(block_group);
6743 down_write(&block_group->space_info->groups_sem); 7177 down_write(&block_group->space_info->groups_sem);
6744 list_del(&block_group->list); 7178 list_del(&block_group->list);
6745 up_write(&block_group->space_info->groups_sem); 7179 up_write(&block_group->space_info->groups_sem);
6746 7180
7181 if (block_group->cached == BTRFS_CACHE_STARTED)
7182 wait_event(block_group->caching_q,
7183 block_group_cache_done(block_group));
7184
7185 btrfs_remove_free_space_cache(block_group);
7186
6747 WARN_ON(atomic_read(&block_group->count) != 1); 7187 WARN_ON(atomic_read(&block_group->count) != 1);
6748 kfree(block_group); 7188 kfree(block_group);
6749 7189
@@ -6809,9 +7249,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
6809 atomic_set(&cache->count, 1); 7249 atomic_set(&cache->count, 1);
6810 spin_lock_init(&cache->lock); 7250 spin_lock_init(&cache->lock);
6811 spin_lock_init(&cache->tree_lock); 7251 spin_lock_init(&cache->tree_lock);
6812 mutex_init(&cache->cache_mutex); 7252 cache->fs_info = info;
7253 init_waitqueue_head(&cache->caching_q);
6813 INIT_LIST_HEAD(&cache->list); 7254 INIT_LIST_HEAD(&cache->list);
6814 INIT_LIST_HEAD(&cache->cluster_list); 7255 INIT_LIST_HEAD(&cache->cluster_list);
7256
7257 /*
7258 * we only want to have 32k of ram per block group for keeping
7259 * track of free space, and if we pass 1/2 of that we want to
7260 * start converting things over to using bitmaps
7261 */
7262 cache->extents_thresh = ((1024 * 32) / 2) /
7263 sizeof(struct btrfs_free_space);
7264
6815 read_extent_buffer(leaf, &cache->item, 7265 read_extent_buffer(leaf, &cache->item,
6816 btrfs_item_ptr_offset(leaf, path->slots[0]), 7266 btrfs_item_ptr_offset(leaf, path->slots[0]),
6817 sizeof(cache->item)); 7267 sizeof(cache->item));
@@ -6820,6 +7270,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
6820 key.objectid = found_key.objectid + found_key.offset; 7270 key.objectid = found_key.objectid + found_key.offset;
6821 btrfs_release_path(root, path); 7271 btrfs_release_path(root, path);
6822 cache->flags = btrfs_block_group_flags(&cache->item); 7272 cache->flags = btrfs_block_group_flags(&cache->item);
7273 cache->sectorsize = root->sectorsize;
7274
7275 remove_sb_from_cache(root, cache);
7276
7277 /*
7278 * check for two cases, either we are full, and therefore
7279 * don't need to bother with the caching work since we won't
7280 * find any space, or we are empty, and we can just add all
7281 * the space in and be done with it. This saves us _alot_ of
7282 * time, particularly in the full case.
7283 */
7284 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7285 cache->cached = BTRFS_CACHE_FINISHED;
7286 } else if (btrfs_block_group_used(&cache->item) == 0) {
7287 cache->cached = BTRFS_CACHE_FINISHED;
7288 add_new_free_space(cache, root->fs_info,
7289 found_key.objectid,
7290 found_key.objectid +
7291 found_key.offset);
7292 }
6823 7293
6824 ret = update_space_info(info, cache->flags, found_key.offset, 7294 ret = update_space_info(info, cache->flags, found_key.offset,
6825 btrfs_block_group_used(&cache->item), 7295 btrfs_block_group_used(&cache->item),
@@ -6863,10 +7333,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
6863 cache->key.objectid = chunk_offset; 7333 cache->key.objectid = chunk_offset;
6864 cache->key.offset = size; 7334 cache->key.offset = size;
6865 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 7335 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7336 cache->sectorsize = root->sectorsize;
7337
7338 /*
7339 * we only want to have 32k of ram per block group for keeping track
7340 * of free space, and if we pass 1/2 of that we want to start
7341 * converting things over to using bitmaps
7342 */
7343 cache->extents_thresh = ((1024 * 32) / 2) /
7344 sizeof(struct btrfs_free_space);
6866 atomic_set(&cache->count, 1); 7345 atomic_set(&cache->count, 1);
6867 spin_lock_init(&cache->lock); 7346 spin_lock_init(&cache->lock);
6868 spin_lock_init(&cache->tree_lock); 7347 spin_lock_init(&cache->tree_lock);
6869 mutex_init(&cache->cache_mutex); 7348 init_waitqueue_head(&cache->caching_q);
6870 INIT_LIST_HEAD(&cache->list); 7349 INIT_LIST_HEAD(&cache->list);
6871 INIT_LIST_HEAD(&cache->cluster_list); 7350 INIT_LIST_HEAD(&cache->cluster_list);
6872 7351
@@ -6875,6 +7354,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
6875 cache->flags = type; 7354 cache->flags = type;
6876 btrfs_set_block_group_flags(&cache->item, type); 7355 btrfs_set_block_group_flags(&cache->item, type);
6877 7356
7357 cache->cached = BTRFS_CACHE_FINISHED;
7358 remove_sb_from_cache(root, cache);
7359
7360 add_new_free_space(cache, root->fs_info, chunk_offset,
7361 chunk_offset + size);
7362
6878 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 7363 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
6879 &cache->space_info); 7364 &cache->space_info);
6880 BUG_ON(ret); 7365 BUG_ON(ret);
@@ -6933,7 +7418,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6933 rb_erase(&block_group->cache_node, 7418 rb_erase(&block_group->cache_node,
6934 &root->fs_info->block_group_cache_tree); 7419 &root->fs_info->block_group_cache_tree);
6935 spin_unlock(&root->fs_info->block_group_cache_lock); 7420 spin_unlock(&root->fs_info->block_group_cache_lock);
6936 btrfs_remove_free_space_cache(block_group); 7421
6937 down_write(&block_group->space_info->groups_sem); 7422 down_write(&block_group->space_info->groups_sem);
6938 /* 7423 /*
6939 * we must use list_del_init so people can check to see if they 7424 * we must use list_del_init so people can check to see if they
@@ -6942,11 +7427,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6942 list_del_init(&block_group->list); 7427 list_del_init(&block_group->list);
6943 up_write(&block_group->space_info->groups_sem); 7428 up_write(&block_group->space_info->groups_sem);
6944 7429
7430 if (block_group->cached == BTRFS_CACHE_STARTED)
7431 wait_event(block_group->caching_q,
7432 block_group_cache_done(block_group));
7433
7434 btrfs_remove_free_space_cache(block_group);
7435
6945 spin_lock(&block_group->space_info->lock); 7436 spin_lock(&block_group->space_info->lock);
6946 block_group->space_info->total_bytes -= block_group->key.offset; 7437 block_group->space_info->total_bytes -= block_group->key.offset;
6947 block_group->space_info->bytes_readonly -= block_group->key.offset; 7438 block_group->space_info->bytes_readonly -= block_group->key.offset;
6948 spin_unlock(&block_group->space_info->lock); 7439 spin_unlock(&block_group->space_info->lock);
6949 block_group->space_info->full = 0; 7440
7441 btrfs_clear_space_info_full(root->fs_info);
6950 7442
6951 btrfs_put_block_group(block_group); 7443 btrfs_put_block_group(block_group);
6952 btrfs_put_block_group(block_group); 7444 btrfs_put_block_group(block_group);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 126477eaecf..4b833972273 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -22,7 +22,6 @@
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/smp_lock.h>
26#include <linux/backing-dev.h> 25#include <linux/backing-dev.h>
27#include <linux/mpage.h> 26#include <linux/mpage.h>
28#include <linux/swap.h> 27#include <linux/swap.h>
@@ -151,7 +150,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
151 } 150 }
152 if (end_pos > isize) { 151 if (end_pos > isize) {
153 i_size_write(inode, end_pos); 152 i_size_write(inode, end_pos);
154 btrfs_update_inode(trans, root, inode); 153 /* we've only changed i_size in ram, and we haven't updated
154 * the disk i_size. There is no need to log the inode
155 * at this time.
156 */
155 } 157 }
156 err = btrfs_end_transaction(trans, root); 158 err = btrfs_end_transaction(trans, root);
157out_unlock: 159out_unlock:
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4538e48581a..5edcee3a617 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -16,45 +16,46 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/pagemap.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/math64.h>
20#include "ctree.h" 22#include "ctree.h"
21#include "free-space-cache.h" 23#include "free-space-cache.h"
22#include "transaction.h" 24#include "transaction.h"
23 25
24struct btrfs_free_space { 26#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
25 struct rb_node bytes_index; 27#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
26 struct rb_node offset_index;
27 u64 offset;
28 u64 bytes;
29};
30 28
31static int tree_insert_offset(struct rb_root *root, u64 offset, 29static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
32 struct rb_node *node) 30 u64 offset)
33{ 31{
34 struct rb_node **p = &root->rb_node; 32 BUG_ON(offset < bitmap_start);
35 struct rb_node *parent = NULL; 33 offset -= bitmap_start;
36 struct btrfs_free_space *info; 34 return (unsigned long)(div64_u64(offset, sectorsize));
35}
37 36
38 while (*p) { 37static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
39 parent = *p; 38{
40 info = rb_entry(parent, struct btrfs_free_space, offset_index); 39 return (unsigned long)(div64_u64(bytes, sectorsize));
40}
41 41
42 if (offset < info->offset) 42static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
43 p = &(*p)->rb_left; 43 u64 offset)
44 else if (offset > info->offset) 44{
45 p = &(*p)->rb_right; 45 u64 bitmap_start;
46 else 46 u64 bytes_per_bitmap;
47 return -EEXIST;
48 }
49 47
50 rb_link_node(node, parent, p); 48 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
51 rb_insert_color(node, root); 49 bitmap_start = offset - block_group->key.objectid;
50 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
51 bitmap_start *= bytes_per_bitmap;
52 bitmap_start += block_group->key.objectid;
52 53
53 return 0; 54 return bitmap_start;
54} 55}
55 56
56static int tree_insert_bytes(struct rb_root *root, u64 bytes, 57static int tree_insert_offset(struct rb_root *root, u64 offset,
57 struct rb_node *node) 58 struct rb_node *node, int bitmap)
58{ 59{
59 struct rb_node **p = &root->rb_node; 60 struct rb_node **p = &root->rb_node;
60 struct rb_node *parent = NULL; 61 struct rb_node *parent = NULL;
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
62 63
63 while (*p) { 64 while (*p) {
64 parent = *p; 65 parent = *p;
65 info = rb_entry(parent, struct btrfs_free_space, bytes_index); 66 info = rb_entry(parent, struct btrfs_free_space, offset_index);
66 67
67 if (bytes < info->bytes) 68 if (offset < info->offset) {
68 p = &(*p)->rb_left; 69 p = &(*p)->rb_left;
69 else 70 } else if (offset > info->offset) {
70 p = &(*p)->rb_right; 71 p = &(*p)->rb_right;
72 } else {
73 /*
74 * we could have a bitmap entry and an extent entry
75 * share the same offset. If this is the case, we want
76 * the extent entry to always be found first if we do a
77 * linear search through the tree, since we want to have
78 * the quickest allocation time, and allocating from an
79 * extent is faster than allocating from a bitmap. So
80 * if we're inserting a bitmap and we find an entry at
81 * this offset, we want to go right, or after this entry
82 * logically. If we are inserting an extent and we've
83 * found a bitmap, we want to go left, or before
84 * logically.
85 */
86 if (bitmap) {
87 WARN_ON(info->bitmap);
88 p = &(*p)->rb_right;
89 } else {
90 WARN_ON(!info->bitmap);
91 p = &(*p)->rb_left;
92 }
93 }
71 } 94 }
72 95
73 rb_link_node(node, parent, p); 96 rb_link_node(node, parent, p);
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
79/* 102/*
80 * searches the tree for the given offset. 103 * searches the tree for the given offset.
81 * 104 *
82 * fuzzy == 1: this is used for allocations where we are given a hint of where 105 * fuzzy - If this is set, then we are trying to make an allocation, and we just
83 * to look for free space. Because the hint may not be completely on an offset 106 * want a section that has at least bytes size and comes at or after the given
84 * mark, or the hint may no longer point to free space we need to fudge our 107 * offset.
85 * results a bit. So we look for free space starting at or after offset with at
86 * least bytes size. We prefer to find as close to the given offset as we can.
87 * Also if the offset is within a free space range, then we will return the free
88 * space that contains the given offset, which means we can return a free space
89 * chunk with an offset before the provided offset.
90 *
91 * fuzzy == 0: this is just a normal tree search. Give us the free space that
92 * starts at the given offset which is at least bytes size, and if its not there
93 * return NULL.
94 */ 108 */
95static struct btrfs_free_space *tree_search_offset(struct rb_root *root, 109static struct btrfs_free_space *
96 u64 offset, u64 bytes, 110tree_search_offset(struct btrfs_block_group_cache *block_group,
97 int fuzzy) 111 u64 offset, int bitmap_only, int fuzzy)
98{ 112{
99 struct rb_node *n = root->rb_node; 113 struct rb_node *n = block_group->free_space_offset.rb_node;
100 struct btrfs_free_space *entry, *ret = NULL; 114 struct btrfs_free_space *entry, *prev = NULL;
115
116 /* find entry that is closest to the 'offset' */
117 while (1) {
118 if (!n) {
119 entry = NULL;
120 break;
121 }
101 122
102 while (n) {
103 entry = rb_entry(n, struct btrfs_free_space, offset_index); 123 entry = rb_entry(n, struct btrfs_free_space, offset_index);
124 prev = entry;
104 125
105 if (offset < entry->offset) { 126 if (offset < entry->offset)
106 if (fuzzy &&
107 (!ret || entry->offset < ret->offset) &&
108 (bytes <= entry->bytes))
109 ret = entry;
110 n = n->rb_left; 127 n = n->rb_left;
111 } else if (offset > entry->offset) { 128 else if (offset > entry->offset)
112 if (fuzzy &&
113 (entry->offset + entry->bytes - 1) >= offset &&
114 bytes <= entry->bytes) {
115 ret = entry;
116 break;
117 }
118 n = n->rb_right; 129 n = n->rb_right;
119 } else { 130 else
120 if (bytes > entry->bytes) {
121 n = n->rb_right;
122 continue;
123 }
124 ret = entry;
125 break; 131 break;
126 }
127 } 132 }
128 133
129 return ret; 134 if (bitmap_only) {
130} 135 if (!entry)
136 return NULL;
137 if (entry->bitmap)
138 return entry;
131 139
132/* 140 /*
133 * return a chunk at least bytes size, as close to offset that we can get. 141 * bitmap entry and extent entry may share same offset,
134 */ 142 * in that case, bitmap entry comes after extent entry.
135static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, 143 */
136 u64 offset, u64 bytes) 144 n = rb_next(n);
137{ 145 if (!n)
138 struct rb_node *n = root->rb_node; 146 return NULL;
139 struct btrfs_free_space *entry, *ret = NULL; 147 entry = rb_entry(n, struct btrfs_free_space, offset_index);
140 148 if (entry->offset != offset)
141 while (n) { 149 return NULL;
142 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
143 150
144 if (bytes < entry->bytes) { 151 WARN_ON(!entry->bitmap);
152 return entry;
153 } else if (entry) {
154 if (entry->bitmap) {
145 /* 155 /*
146 * We prefer to get a hole size as close to the size we 156 * if previous extent entry covers the offset,
147 * are asking for so we don't take small slivers out of 157 * we should return it instead of the bitmap entry
148 * huge holes, but we also want to get as close to the
149 * offset as possible so we don't have a whole lot of
150 * fragmentation.
151 */ 158 */
152 if (offset <= entry->offset) { 159 n = &entry->offset_index;
153 if (!ret) 160 while (1) {
154 ret = entry; 161 n = rb_prev(n);
155 else if (entry->bytes < ret->bytes) 162 if (!n)
156 ret = entry; 163 break;
157 else if (entry->offset < ret->offset) 164 prev = rb_entry(n, struct btrfs_free_space,
158 ret = entry; 165 offset_index);
166 if (!prev->bitmap) {
167 if (prev->offset + prev->bytes > offset)
168 entry = prev;
169 break;
170 }
159 } 171 }
160 n = n->rb_left; 172 }
161 } else if (bytes > entry->bytes) { 173 return entry;
162 n = n->rb_right; 174 }
175
176 if (!prev)
177 return NULL;
178
179 /* find last entry before the 'offset' */
180 entry = prev;
181 if (entry->offset > offset) {
182 n = rb_prev(&entry->offset_index);
183 if (n) {
184 entry = rb_entry(n, struct btrfs_free_space,
185 offset_index);
186 BUG_ON(entry->offset > offset);
163 } else { 187 } else {
164 /* 188 if (fuzzy)
165 * Ok we may have multiple chunks of the wanted size, 189 return entry;
166 * so we don't want to take the first one we find, we 190 else
167 * want to take the one closest to our given offset, so 191 return NULL;
168 * keep searching just in case theres a better match.
169 */
170 n = n->rb_right;
171 if (offset > entry->offset)
172 continue;
173 else if (!ret || entry->offset < ret->offset)
174 ret = entry;
175 } 192 }
176 } 193 }
177 194
178 return ret; 195 if (entry->bitmap) {
196 n = &entry->offset_index;
197 while (1) {
198 n = rb_prev(n);
199 if (!n)
200 break;
201 prev = rb_entry(n, struct btrfs_free_space,
202 offset_index);
203 if (!prev->bitmap) {
204 if (prev->offset + prev->bytes > offset)
205 return prev;
206 break;
207 }
208 }
209 if (entry->offset + BITS_PER_BITMAP *
210 block_group->sectorsize > offset)
211 return entry;
212 } else if (entry->offset + entry->bytes > offset)
213 return entry;
214
215 if (!fuzzy)
216 return NULL;
217
218 while (1) {
219 if (entry->bitmap) {
220 if (entry->offset + BITS_PER_BITMAP *
221 block_group->sectorsize > offset)
222 break;
223 } else {
224 if (entry->offset + entry->bytes > offset)
225 break;
226 }
227
228 n = rb_next(&entry->offset_index);
229 if (!n)
230 return NULL;
231 entry = rb_entry(n, struct btrfs_free_space, offset_index);
232 }
233 return entry;
179} 234}
180 235
181static void unlink_free_space(struct btrfs_block_group_cache *block_group, 236static void unlink_free_space(struct btrfs_block_group_cache *block_group,
182 struct btrfs_free_space *info) 237 struct btrfs_free_space *info)
183{ 238{
184 rb_erase(&info->offset_index, &block_group->free_space_offset); 239 rb_erase(&info->offset_index, &block_group->free_space_offset);
185 rb_erase(&info->bytes_index, &block_group->free_space_bytes); 240 block_group->free_extents--;
241 block_group->free_space -= info->bytes;
186} 242}
187 243
188static int link_free_space(struct btrfs_block_group_cache *block_group, 244static int link_free_space(struct btrfs_block_group_cache *block_group,
@@ -190,17 +246,353 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
190{ 246{
191 int ret = 0; 247 int ret = 0;
192 248
193 249 BUG_ON(!info->bitmap && !info->bytes);
194 BUG_ON(!info->bytes);
195 ret = tree_insert_offset(&block_group->free_space_offset, info->offset, 250 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
196 &info->offset_index); 251 &info->offset_index, (info->bitmap != NULL));
197 if (ret) 252 if (ret)
198 return ret; 253 return ret;
199 254
200 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, 255 block_group->free_space += info->bytes;
201 &info->bytes_index); 256 block_group->free_extents++;
202 if (ret) 257 return ret;
203 return ret; 258}
259
260static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
261{
262 u64 max_bytes, possible_bytes;
263
264 /*
265 * The goal is to keep the total amount of memory used per 1gb of space
266 * at or below 32k, so we need to adjust how much memory we allow to be
267 * used by extent based free space tracking
268 */
269 max_bytes = MAX_CACHE_BYTES_PER_GIG *
270 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
271
272 possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
273 (sizeof(struct btrfs_free_space) *
274 block_group->extents_thresh);
275
276 if (possible_bytes > max_bytes) {
277 int extent_bytes = max_bytes -
278 (block_group->total_bitmaps * PAGE_CACHE_SIZE);
279
280 if (extent_bytes <= 0) {
281 block_group->extents_thresh = 0;
282 return;
283 }
284
285 block_group->extents_thresh = extent_bytes /
286 (sizeof(struct btrfs_free_space));
287 }
288}
289
290static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
291 struct btrfs_free_space *info, u64 offset,
292 u64 bytes)
293{
294 unsigned long start, end;
295 unsigned long i;
296
297 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
298 end = start + bytes_to_bits(bytes, block_group->sectorsize);
299 BUG_ON(end > BITS_PER_BITMAP);
300
301 for (i = start; i < end; i++)
302 clear_bit(i, info->bitmap);
303
304 info->bytes -= bytes;
305 block_group->free_space -= bytes;
306}
307
308static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
309 struct btrfs_free_space *info, u64 offset,
310 u64 bytes)
311{
312 unsigned long start, end;
313 unsigned long i;
314
315 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
316 end = start + bytes_to_bits(bytes, block_group->sectorsize);
317 BUG_ON(end > BITS_PER_BITMAP);
318
319 for (i = start; i < end; i++)
320 set_bit(i, info->bitmap);
321
322 info->bytes += bytes;
323 block_group->free_space += bytes;
324}
325
326static int search_bitmap(struct btrfs_block_group_cache *block_group,
327 struct btrfs_free_space *bitmap_info, u64 *offset,
328 u64 *bytes)
329{
330 unsigned long found_bits = 0;
331 unsigned long bits, i;
332 unsigned long next_zero;
333
334 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
335 max_t(u64, *offset, bitmap_info->offset));
336 bits = bytes_to_bits(*bytes, block_group->sectorsize);
337
338 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
339 i < BITS_PER_BITMAP;
340 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
341 next_zero = find_next_zero_bit(bitmap_info->bitmap,
342 BITS_PER_BITMAP, i);
343 if ((next_zero - i) >= bits) {
344 found_bits = next_zero - i;
345 break;
346 }
347 i = next_zero;
348 }
349
350 if (found_bits) {
351 *offset = (u64)(i * block_group->sectorsize) +
352 bitmap_info->offset;
353 *bytes = (u64)(found_bits) * block_group->sectorsize;
354 return 0;
355 }
356
357 return -1;
358}
359
360static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
361 *block_group, u64 *offset,
362 u64 *bytes, int debug)
363{
364 struct btrfs_free_space *entry;
365 struct rb_node *node;
366 int ret;
367
368 if (!block_group->free_space_offset.rb_node)
369 return NULL;
370
371 entry = tree_search_offset(block_group,
372 offset_to_bitmap(block_group, *offset),
373 0, 1);
374 if (!entry)
375 return NULL;
376
377 for (node = &entry->offset_index; node; node = rb_next(node)) {
378 entry = rb_entry(node, struct btrfs_free_space, offset_index);
379 if (entry->bytes < *bytes)
380 continue;
381
382 if (entry->bitmap) {
383 ret = search_bitmap(block_group, entry, offset, bytes);
384 if (!ret)
385 return entry;
386 continue;
387 }
388
389 *offset = entry->offset;
390 *bytes = entry->bytes;
391 return entry;
392 }
393
394 return NULL;
395}
396
397static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
398 struct btrfs_free_space *info, u64 offset)
399{
400 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
401 int max_bitmaps = (int)div64_u64(block_group->key.offset +
402 bytes_per_bg - 1, bytes_per_bg);
403 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
404
405 info->offset = offset_to_bitmap(block_group, offset);
406 link_free_space(block_group, info);
407 block_group->total_bitmaps++;
408
409 recalculate_thresholds(block_group);
410}
411
412static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
413 struct btrfs_free_space *bitmap_info,
414 u64 *offset, u64 *bytes)
415{
416 u64 end;
417 u64 search_start, search_bytes;
418 int ret;
419
420again:
421 end = bitmap_info->offset +
422 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
423
424 /*
425 * XXX - this can go away after a few releases.
426 *
427 * since the only user of btrfs_remove_free_space is the tree logging
428 * stuff, and the only way to test that is under crash conditions, we
429 * want to have this debug stuff here just in case somethings not
430 * working. Search the bitmap for the space we are trying to use to
431 * make sure its actually there. If its not there then we need to stop
432 * because something has gone wrong.
433 */
434 search_start = *offset;
435 search_bytes = *bytes;
436 ret = search_bitmap(block_group, bitmap_info, &search_start,
437 &search_bytes);
438 BUG_ON(ret < 0 || search_start != *offset);
439
440 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
441 bitmap_clear_bits(block_group, bitmap_info, *offset,
442 end - *offset + 1);
443 *bytes -= end - *offset + 1;
444 *offset = end + 1;
445 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
446 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
447 *bytes = 0;
448 }
449
450 if (*bytes) {
451 struct rb_node *next = rb_next(&bitmap_info->offset_index);
452 if (!bitmap_info->bytes) {
453 unlink_free_space(block_group, bitmap_info);
454 kfree(bitmap_info->bitmap);
455 kfree(bitmap_info);
456 block_group->total_bitmaps--;
457 recalculate_thresholds(block_group);
458 }
459
460 /*
461 * no entry after this bitmap, but we still have bytes to
462 * remove, so something has gone wrong.
463 */
464 if (!next)
465 return -EINVAL;
466
467 bitmap_info = rb_entry(next, struct btrfs_free_space,
468 offset_index);
469
470 /*
471 * if the next entry isn't a bitmap we need to return to let the
472 * extent stuff do its work.
473 */
474 if (!bitmap_info->bitmap)
475 return -EAGAIN;
476
477 /*
478 * Ok the next item is a bitmap, but it may not actually hold
479 * the information for the rest of this free space stuff, so
480 * look for it, and if we don't find it return so we can try
481 * everything over again.
482 */
483 search_start = *offset;
484 search_bytes = *bytes;
485 ret = search_bitmap(block_group, bitmap_info, &search_start,
486 &search_bytes);
487 if (ret < 0 || search_start != *offset)
488 return -EAGAIN;
489
490 goto again;
491 } else if (!bitmap_info->bytes) {
492 unlink_free_space(block_group, bitmap_info);
493 kfree(bitmap_info->bitmap);
494 kfree(bitmap_info);
495 block_group->total_bitmaps--;
496 recalculate_thresholds(block_group);
497 }
498
499 return 0;
500}
501
502static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
503 struct btrfs_free_space *info)
504{
505 struct btrfs_free_space *bitmap_info;
506 int added = 0;
507 u64 bytes, offset, end;
508 int ret;
509
510 /*
511 * If we are below the extents threshold then we can add this as an
512 * extent, and don't have to deal with the bitmap
513 */
514 if (block_group->free_extents < block_group->extents_thresh &&
515 info->bytes > block_group->sectorsize * 4)
516 return 0;
517
518 /*
519 * some block groups are so tiny they can't be enveloped by a bitmap, so
520 * don't even bother to create a bitmap for this
521 */
522 if (BITS_PER_BITMAP * block_group->sectorsize >
523 block_group->key.offset)
524 return 0;
525
526 bytes = info->bytes;
527 offset = info->offset;
528
529again:
530 bitmap_info = tree_search_offset(block_group,
531 offset_to_bitmap(block_group, offset),
532 1, 0);
533 if (!bitmap_info) {
534 BUG_ON(added);
535 goto new_bitmap;
536 }
537
538 end = bitmap_info->offset +
539 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
540
541 if (offset >= bitmap_info->offset && offset + bytes > end) {
542 bitmap_set_bits(block_group, bitmap_info, offset,
543 end - offset);
544 bytes -= end - offset;
545 offset = end;
546 added = 0;
547 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
548 bitmap_set_bits(block_group, bitmap_info, offset, bytes);
549 bytes = 0;
550 } else {
551 BUG();
552 }
553
554 if (!bytes) {
555 ret = 1;
556 goto out;
557 } else
558 goto again;
559
560new_bitmap:
561 if (info && info->bitmap) {
562 add_new_bitmap(block_group, info, offset);
563 added = 1;
564 info = NULL;
565 goto again;
566 } else {
567 spin_unlock(&block_group->tree_lock);
568
569 /* no pre-allocated info, allocate a new one */
570 if (!info) {
571 info = kzalloc(sizeof(struct btrfs_free_space),
572 GFP_NOFS);
573 if (!info) {
574 spin_lock(&block_group->tree_lock);
575 ret = -ENOMEM;
576 goto out;
577 }
578 }
579
580 /* allocate the bitmap */
581 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
582 spin_lock(&block_group->tree_lock);
583 if (!info->bitmap) {
584 ret = -ENOMEM;
585 goto out;
586 }
587 goto again;
588 }
589
590out:
591 if (info) {
592 if (info->bitmap)
593 kfree(info->bitmap);
594 kfree(info);
595 }
204 596
205 return ret; 597 return ret;
206} 598}
@@ -208,8 +600,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
208int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 600int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
209 u64 offset, u64 bytes) 601 u64 offset, u64 bytes)
210{ 602{
211 struct btrfs_free_space *right_info; 603 struct btrfs_free_space *right_info = NULL;
212 struct btrfs_free_space *left_info; 604 struct btrfs_free_space *left_info = NULL;
213 struct btrfs_free_space *info = NULL; 605 struct btrfs_free_space *info = NULL;
214 int ret = 0; 606 int ret = 0;
215 607
@@ -227,18 +619,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
227 * are adding, if there is remove that struct and add a new one to 619 * are adding, if there is remove that struct and add a new one to
228 * cover the entire range 620 * cover the entire range
229 */ 621 */
230 right_info = tree_search_offset(&block_group->free_space_offset, 622 right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
231 offset+bytes, 0, 0); 623 if (right_info && rb_prev(&right_info->offset_index))
232 left_info = tree_search_offset(&block_group->free_space_offset, 624 left_info = rb_entry(rb_prev(&right_info->offset_index),
233 offset-1, 0, 1); 625 struct btrfs_free_space, offset_index);
626 else
627 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
628
629 /*
630 * If there was no extent directly to the left or right of this new
631 * extent then we know we're going to have to allocate a new extent, so
632 * before we do that see if we need to drop this into a bitmap
633 */
634 if ((!left_info || left_info->bitmap) &&
635 (!right_info || right_info->bitmap)) {
636 ret = insert_into_bitmap(block_group, info);
637
638 if (ret < 0) {
639 goto out;
640 } else if (ret) {
641 ret = 0;
642 goto out;
643 }
644 }
234 645
235 if (right_info) { 646 if (right_info && !right_info->bitmap) {
236 unlink_free_space(block_group, right_info); 647 unlink_free_space(block_group, right_info);
237 info->bytes += right_info->bytes; 648 info->bytes += right_info->bytes;
238 kfree(right_info); 649 kfree(right_info);
239 } 650 }
240 651
241 if (left_info && left_info->offset + left_info->bytes == offset) { 652 if (left_info && !left_info->bitmap &&
653 left_info->offset + left_info->bytes == offset) {
242 unlink_free_space(block_group, left_info); 654 unlink_free_space(block_group, left_info);
243 info->offset = left_info->offset; 655 info->offset = left_info->offset;
244 info->bytes += left_info->bytes; 656 info->bytes += left_info->bytes;
@@ -248,11 +660,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
248 ret = link_free_space(block_group, info); 660 ret = link_free_space(block_group, info);
249 if (ret) 661 if (ret)
250 kfree(info); 662 kfree(info);
251 663out:
252 spin_unlock(&block_group->tree_lock); 664 spin_unlock(&block_group->tree_lock);
253 665
254 if (ret) { 666 if (ret) {
255 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); 667 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
256 BUG_ON(ret == -EEXIST); 668 BUG_ON(ret == -EEXIST);
257 } 669 }
258 670
@@ -263,40 +675,74 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
263 u64 offset, u64 bytes) 675 u64 offset, u64 bytes)
264{ 676{
265 struct btrfs_free_space *info; 677 struct btrfs_free_space *info;
678 struct btrfs_free_space *next_info = NULL;
266 int ret = 0; 679 int ret = 0;
267 680
268 spin_lock(&block_group->tree_lock); 681 spin_lock(&block_group->tree_lock);
269 682
270 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 683again:
271 1); 684 info = tree_search_offset(block_group, offset, 0, 0);
272 if (info && info->offset == offset) { 685 if (!info) {
273 if (info->bytes < bytes) { 686 /*
274 printk(KERN_ERR "Found free space at %llu, size %llu," 687 * oops didn't find an extent that matched the space we wanted
275 "trying to use %llu\n", 688 * to remove, look for a bitmap instead
276 (unsigned long long)info->offset, 689 */
277 (unsigned long long)info->bytes, 690 info = tree_search_offset(block_group,
278 (unsigned long long)bytes); 691 offset_to_bitmap(block_group, offset),
692 1, 0);
693 if (!info) {
694 WARN_ON(1);
695 goto out_lock;
696 }
697 }
698
699 if (info->bytes < bytes && rb_next(&info->offset_index)) {
700 u64 end;
701 next_info = rb_entry(rb_next(&info->offset_index),
702 struct btrfs_free_space,
703 offset_index);
704
705 if (next_info->bitmap)
706 end = next_info->offset + BITS_PER_BITMAP *
707 block_group->sectorsize - 1;
708 else
709 end = next_info->offset + next_info->bytes;
710
711 if (next_info->bytes < bytes ||
712 next_info->offset > offset || offset > end) {
713 printk(KERN_CRIT "Found free space at %llu, size %llu,"
714 " trying to use %llu\n",
715 (unsigned long long)info->offset,
716 (unsigned long long)info->bytes,
717 (unsigned long long)bytes);
279 WARN_ON(1); 718 WARN_ON(1);
280 ret = -EINVAL; 719 ret = -EINVAL;
281 spin_unlock(&block_group->tree_lock); 720 goto out_lock;
282 goto out;
283 } 721 }
284 unlink_free_space(block_group, info);
285 722
286 if (info->bytes == bytes) { 723 info = next_info;
287 kfree(info); 724 }
288 spin_unlock(&block_group->tree_lock); 725
289 goto out; 726 if (info->bytes == bytes) {
727 unlink_free_space(block_group, info);
728 if (info->bitmap) {
729 kfree(info->bitmap);
730 block_group->total_bitmaps--;
290 } 731 }
732 kfree(info);
733 goto out_lock;
734 }
291 735
736 if (!info->bitmap && info->offset == offset) {
737 unlink_free_space(block_group, info);
292 info->offset += bytes; 738 info->offset += bytes;
293 info->bytes -= bytes; 739 info->bytes -= bytes;
740 link_free_space(block_group, info);
741 goto out_lock;
742 }
294 743
295 ret = link_free_space(block_group, info); 744 if (!info->bitmap && info->offset <= offset &&
296 spin_unlock(&block_group->tree_lock); 745 info->offset + info->bytes >= offset + bytes) {
297 BUG_ON(ret);
298 } else if (info && info->offset < offset &&
299 info->offset + info->bytes >= offset + bytes) {
300 u64 old_start = info->offset; 746 u64 old_start = info->offset;
301 /* 747 /*
302 * we're freeing space in the middle of the info, 748 * we're freeing space in the middle of the info,
@@ -312,7 +758,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
312 info->offset = offset + bytes; 758 info->offset = offset + bytes;
313 info->bytes = old_end - info->offset; 759 info->bytes = old_end - info->offset;
314 ret = link_free_space(block_group, info); 760 ret = link_free_space(block_group, info);
315 BUG_ON(ret); 761 WARN_ON(ret);
762 if (ret)
763 goto out_lock;
316 } else { 764 } else {
317 /* the hole we're creating ends at the end 765 /* the hole we're creating ends at the end
318 * of the info struct, just free the info 766 * of the info struct, just free the info
@@ -320,32 +768,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
320 kfree(info); 768 kfree(info);
321 } 769 }
322 spin_unlock(&block_group->tree_lock); 770 spin_unlock(&block_group->tree_lock);
323 /* step two, insert a new info struct to cover anything 771
324 * before the hole 772 /* step two, insert a new info struct to cover
773 * anything before the hole
325 */ 774 */
326 ret = btrfs_add_free_space(block_group, old_start, 775 ret = btrfs_add_free_space(block_group, old_start,
327 offset - old_start); 776 offset - old_start);
328 BUG_ON(ret); 777 WARN_ON(ret);
329 } else { 778 goto out;
330 spin_unlock(&block_group->tree_lock);
331 if (!info) {
332 printk(KERN_ERR "couldn't find space %llu to free\n",
333 (unsigned long long)offset);
334 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
335 block_group->cached,
336 (unsigned long long)block_group->key.objectid,
337 (unsigned long long)block_group->key.offset);
338 btrfs_dump_free_space(block_group, bytes);
339 } else if (info) {
340 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
341 "but wanted offset=%llu bytes=%llu\n",
342 (unsigned long long)info->offset,
343 (unsigned long long)info->bytes,
344 (unsigned long long)offset,
345 (unsigned long long)bytes);
346 }
347 WARN_ON(1);
348 } 779 }
780
781 ret = remove_from_bitmap(block_group, info, &offset, &bytes);
782 if (ret == -EAGAIN)
783 goto again;
784 BUG_ON(ret);
785out_lock:
786 spin_unlock(&block_group->tree_lock);
349out: 787out:
350 return ret; 788 return ret;
351} 789}
@@ -361,10 +799,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
361 info = rb_entry(n, struct btrfs_free_space, offset_index); 799 info = rb_entry(n, struct btrfs_free_space, offset_index);
362 if (info->bytes >= bytes) 800 if (info->bytes >= bytes)
363 count++; 801 count++;
364 printk(KERN_ERR "entry offset %llu, bytes %llu\n", 802 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
365 (unsigned long long)info->offset, 803 (unsigned long long)info->offset,
366 (unsigned long long)info->bytes); 804 (unsigned long long)info->bytes,
805 (info->bitmap) ? "yes" : "no");
367 } 806 }
807 printk(KERN_INFO "block group has cluster?: %s\n",
808 list_empty(&block_group->cluster_list) ? "no" : "yes");
368 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" 809 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
369 "\n", count); 810 "\n", count);
370} 811}
@@ -397,26 +838,35 @@ __btrfs_return_cluster_to_free_space(
397{ 838{
398 struct btrfs_free_space *entry; 839 struct btrfs_free_space *entry;
399 struct rb_node *node; 840 struct rb_node *node;
841 bool bitmap;
400 842
401 spin_lock(&cluster->lock); 843 spin_lock(&cluster->lock);
402 if (cluster->block_group != block_group) 844 if (cluster->block_group != block_group)
403 goto out; 845 goto out;
404 846
847 bitmap = cluster->points_to_bitmap;
848 cluster->block_group = NULL;
405 cluster->window_start = 0; 849 cluster->window_start = 0;
850 list_del_init(&cluster->block_group_list);
851 cluster->points_to_bitmap = false;
852
853 if (bitmap)
854 goto out;
855
406 node = rb_first(&cluster->root); 856 node = rb_first(&cluster->root);
407 while(node) { 857 while (node) {
408 entry = rb_entry(node, struct btrfs_free_space, offset_index); 858 entry = rb_entry(node, struct btrfs_free_space, offset_index);
409 node = rb_next(&entry->offset_index); 859 node = rb_next(&entry->offset_index);
410 rb_erase(&entry->offset_index, &cluster->root); 860 rb_erase(&entry->offset_index, &cluster->root);
411 link_free_space(block_group, entry); 861 BUG_ON(entry->bitmap);
862 tree_insert_offset(&block_group->free_space_offset,
863 entry->offset, &entry->offset_index, 0);
412 } 864 }
413 list_del_init(&cluster->block_group_list);
414
415 btrfs_put_block_group(cluster->block_group);
416 cluster->block_group = NULL;
417 cluster->root.rb_node = NULL; 865 cluster->root.rb_node = NULL;
866
418out: 867out:
419 spin_unlock(&cluster->lock); 868 spin_unlock(&cluster->lock);
869 btrfs_put_block_group(block_group);
420 return 0; 870 return 0;
421} 871}
422 872
@@ -425,20 +875,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
425 struct btrfs_free_space *info; 875 struct btrfs_free_space *info;
426 struct rb_node *node; 876 struct rb_node *node;
427 struct btrfs_free_cluster *cluster; 877 struct btrfs_free_cluster *cluster;
428 struct btrfs_free_cluster *safe; 878 struct list_head *head;
429 879
430 spin_lock(&block_group->tree_lock); 880 spin_lock(&block_group->tree_lock);
431 881 while ((head = block_group->cluster_list.next) !=
432 list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, 882 &block_group->cluster_list) {
433 block_group_list) { 883 cluster = list_entry(head, struct btrfs_free_cluster,
884 block_group_list);
434 885
435 WARN_ON(cluster->block_group != block_group); 886 WARN_ON(cluster->block_group != block_group);
436 __btrfs_return_cluster_to_free_space(block_group, cluster); 887 __btrfs_return_cluster_to_free_space(block_group, cluster);
888 if (need_resched()) {
889 spin_unlock(&block_group->tree_lock);
890 cond_resched();
891 spin_lock(&block_group->tree_lock);
892 }
437 } 893 }
438 894
439 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { 895 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
440 info = rb_entry(node, struct btrfs_free_space, bytes_index); 896 info = rb_entry(node, struct btrfs_free_space, offset_index);
441 unlink_free_space(block_group, info); 897 unlink_free_space(block_group, info);
898 if (info->bitmap)
899 kfree(info->bitmap);
442 kfree(info); 900 kfree(info);
443 if (need_resched()) { 901 if (need_resched()) {
444 spin_unlock(&block_group->tree_lock); 902 spin_unlock(&block_group->tree_lock);
@@ -446,6 +904,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
446 spin_lock(&block_group->tree_lock); 904 spin_lock(&block_group->tree_lock);
447 } 905 }
448 } 906 }
907
449 spin_unlock(&block_group->tree_lock); 908 spin_unlock(&block_group->tree_lock);
450} 909}
451 910
@@ -453,25 +912,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
453 u64 offset, u64 bytes, u64 empty_size) 912 u64 offset, u64 bytes, u64 empty_size)
454{ 913{
455 struct btrfs_free_space *entry = NULL; 914 struct btrfs_free_space *entry = NULL;
915 u64 bytes_search = bytes + empty_size;
456 u64 ret = 0; 916 u64 ret = 0;
457 917
458 spin_lock(&block_group->tree_lock); 918 spin_lock(&block_group->tree_lock);
459 entry = tree_search_offset(&block_group->free_space_offset, offset, 919 entry = find_free_space(block_group, &offset, &bytes_search, 0);
460 bytes + empty_size, 1);
461 if (!entry) 920 if (!entry)
462 entry = tree_search_bytes(&block_group->free_space_bytes, 921 goto out;
463 offset, bytes + empty_size); 922
464 if (entry) { 923 ret = offset;
924 if (entry->bitmap) {
925 bitmap_clear_bits(block_group, entry, offset, bytes);
926 if (!entry->bytes) {
927 unlink_free_space(block_group, entry);
928 kfree(entry->bitmap);
929 kfree(entry);
930 block_group->total_bitmaps--;
931 recalculate_thresholds(block_group);
932 }
933 } else {
465 unlink_free_space(block_group, entry); 934 unlink_free_space(block_group, entry);
466 ret = entry->offset;
467 entry->offset += bytes; 935 entry->offset += bytes;
468 entry->bytes -= bytes; 936 entry->bytes -= bytes;
469
470 if (!entry->bytes) 937 if (!entry->bytes)
471 kfree(entry); 938 kfree(entry);
472 else 939 else
473 link_free_space(block_group, entry); 940 link_free_space(block_group, entry);
474 } 941 }
942
943out:
475 spin_unlock(&block_group->tree_lock); 944 spin_unlock(&block_group->tree_lock);
476 945
477 return ret; 946 return ret;
@@ -517,6 +986,54 @@ int btrfs_return_cluster_to_free_space(
517 return ret; 986 return ret;
518} 987}
519 988
989static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
990 struct btrfs_free_cluster *cluster,
991 u64 bytes, u64 min_start)
992{
993 struct btrfs_free_space *entry;
994 int err;
995 u64 search_start = cluster->window_start;
996 u64 search_bytes = bytes;
997 u64 ret = 0;
998
999 spin_lock(&block_group->tree_lock);
1000 spin_lock(&cluster->lock);
1001
1002 if (!cluster->points_to_bitmap)
1003 goto out;
1004
1005 if (cluster->block_group != block_group)
1006 goto out;
1007
1008 /*
1009 * search_start is the beginning of the bitmap, but at some point it may
1010 * be a good idea to point to the actual start of the free area in the
1011 * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
1012 * to 1 to make sure we get the bitmap entry
1013 */
1014 entry = tree_search_offset(block_group,
1015 offset_to_bitmap(block_group, search_start),
1016 1, 0);
1017 if (!entry || !entry->bitmap)
1018 goto out;
1019
1020 search_start = min_start;
1021 search_bytes = bytes;
1022
1023 err = search_bitmap(block_group, entry, &search_start,
1024 &search_bytes);
1025 if (err)
1026 goto out;
1027
1028 ret = search_start;
1029 bitmap_clear_bits(block_group, entry, ret, bytes);
1030out:
1031 spin_unlock(&cluster->lock);
1032 spin_unlock(&block_group->tree_lock);
1033
1034 return ret;
1035}
1036
520/* 1037/*
521 * given a cluster, try to allocate 'bytes' from it, returns 0 1038 * given a cluster, try to allocate 'bytes' from it, returns 0
522 * if it couldn't find anything suitably large, or a logical disk offset 1039 * if it couldn't find anything suitably large, or a logical disk offset
@@ -530,6 +1047,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
530 struct rb_node *node; 1047 struct rb_node *node;
531 u64 ret = 0; 1048 u64 ret = 0;
532 1049
1050 if (cluster->points_to_bitmap)
1051 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
1052 min_start);
1053
533 spin_lock(&cluster->lock); 1054 spin_lock(&cluster->lock);
534 if (bytes > cluster->max_size) 1055 if (bytes > cluster->max_size)
535 goto out; 1056 goto out;
@@ -567,9 +1088,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
567 } 1088 }
568out: 1089out:
569 spin_unlock(&cluster->lock); 1090 spin_unlock(&cluster->lock);
1091
570 return ret; 1092 return ret;
571} 1093}
572 1094
1095static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1096 struct btrfs_free_space *entry,
1097 struct btrfs_free_cluster *cluster,
1098 u64 offset, u64 bytes, u64 min_bytes)
1099{
1100 unsigned long next_zero;
1101 unsigned long i;
1102 unsigned long search_bits;
1103 unsigned long total_bits;
1104 unsigned long found_bits;
1105 unsigned long start = 0;
1106 unsigned long total_found = 0;
1107 bool found = false;
1108
1109 i = offset_to_bit(entry->offset, block_group->sectorsize,
1110 max_t(u64, offset, entry->offset));
1111 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1112 total_bits = bytes_to_bits(bytes, block_group->sectorsize);
1113
1114again:
1115 found_bits = 0;
1116 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
1117 i < BITS_PER_BITMAP;
1118 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
1119 next_zero = find_next_zero_bit(entry->bitmap,
1120 BITS_PER_BITMAP, i);
1121 if (next_zero - i >= search_bits) {
1122 found_bits = next_zero - i;
1123 break;
1124 }
1125 i = next_zero;
1126 }
1127
1128 if (!found_bits)
1129 return -1;
1130
1131 if (!found) {
1132 start = i;
1133 found = true;
1134 }
1135
1136 total_found += found_bits;
1137
1138 if (cluster->max_size < found_bits * block_group->sectorsize)
1139 cluster->max_size = found_bits * block_group->sectorsize;
1140
1141 if (total_found < total_bits) {
1142 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
1143 if (i - start > total_bits * 2) {
1144 total_found = 0;
1145 cluster->max_size = 0;
1146 found = false;
1147 }
1148 goto again;
1149 }
1150
1151 cluster->window_start = start * block_group->sectorsize +
1152 entry->offset;
1153 cluster->points_to_bitmap = true;
1154
1155 return 0;
1156}
1157
573/* 1158/*
574 * here we try to find a cluster of blocks in a block group. The goal 1159 * here we try to find a cluster of blocks in a block group. The goal
575 * is to find at least bytes free and up to empty_size + bytes free. 1160 * is to find at least bytes free and up to empty_size + bytes free.
@@ -587,12 +1172,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
587 struct btrfs_free_space *entry = NULL; 1172 struct btrfs_free_space *entry = NULL;
588 struct rb_node *node; 1173 struct rb_node *node;
589 struct btrfs_free_space *next; 1174 struct btrfs_free_space *next;
590 struct btrfs_free_space *last; 1175 struct btrfs_free_space *last = NULL;
591 u64 min_bytes; 1176 u64 min_bytes;
592 u64 window_start; 1177 u64 window_start;
593 u64 window_free; 1178 u64 window_free;
594 u64 max_extent = 0; 1179 u64 max_extent = 0;
595 int total_retries = 0; 1180 bool found_bitmap = false;
596 int ret; 1181 int ret;
597 1182
598 /* for metadata, allow allocates with more holes */ 1183 /* for metadata, allow allocates with more holes */
@@ -620,31 +1205,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
620 goto out; 1205 goto out;
621 } 1206 }
622again: 1207again:
623 min_bytes = min(min_bytes, bytes + empty_size); 1208 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
624 entry = tree_search_bytes(&block_group->free_space_bytes,
625 offset, min_bytes);
626 if (!entry) { 1209 if (!entry) {
627 ret = -ENOSPC; 1210 ret = -ENOSPC;
628 goto out; 1211 goto out;
629 } 1212 }
1213
1214 /*
1215 * If found_bitmap is true, we exhausted our search for extent entries,
1216 * and we just want to search all of the bitmaps that we can find, and
1217 * ignore any extent entries we find.
1218 */
1219 while (entry->bitmap || found_bitmap ||
1220 (!entry->bitmap && entry->bytes < min_bytes)) {
1221 struct rb_node *node = rb_next(&entry->offset_index);
1222
1223 if (entry->bitmap && entry->bytes > bytes + empty_size) {
1224 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
1225 offset, bytes + empty_size,
1226 min_bytes);
1227 if (!ret)
1228 goto got_it;
1229 }
1230
1231 if (!node) {
1232 ret = -ENOSPC;
1233 goto out;
1234 }
1235 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1236 }
1237
1238 /*
1239 * We already searched all the extent entries from the passed in offset
1240 * to the end and didn't find enough space for the cluster, and we also
1241 * didn't find any bitmaps that met our criteria, just go ahead and exit
1242 */
1243 if (found_bitmap) {
1244 ret = -ENOSPC;
1245 goto out;
1246 }
1247
1248 cluster->points_to_bitmap = false;
630 window_start = entry->offset; 1249 window_start = entry->offset;
631 window_free = entry->bytes; 1250 window_free = entry->bytes;
632 last = entry; 1251 last = entry;
633 max_extent = entry->bytes; 1252 max_extent = entry->bytes;
634 1253
635 while(1) { 1254 while (1) {
636 /* out window is just right, lets fill it */ 1255 /* out window is just right, lets fill it */
637 if (window_free >= bytes + empty_size) 1256 if (window_free >= bytes + empty_size)
638 break; 1257 break;
639 1258
640 node = rb_next(&last->offset_index); 1259 node = rb_next(&last->offset_index);
641 if (!node) { 1260 if (!node) {
1261 if (found_bitmap)
1262 goto again;
642 ret = -ENOSPC; 1263 ret = -ENOSPC;
643 goto out; 1264 goto out;
644 } 1265 }
645 next = rb_entry(node, struct btrfs_free_space, offset_index); 1266 next = rb_entry(node, struct btrfs_free_space, offset_index);
646 1267
647 /* 1268 /*
1269 * we found a bitmap, so if this search doesn't result in a
1270 * cluster, we know to go and search again for the bitmaps and
1271 * start looking for space there
1272 */
1273 if (next->bitmap) {
1274 if (!found_bitmap)
1275 offset = next->offset;
1276 found_bitmap = true;
1277 last = next;
1278 continue;
1279 }
1280
1281 /*
648 * we haven't filled the empty size and the window is 1282 * we haven't filled the empty size and the window is
649 * very large. reset and try again 1283 * very large. reset and try again
650 */ 1284 */
@@ -655,19 +1289,6 @@ again:
655 window_free = entry->bytes; 1289 window_free = entry->bytes;
656 last = entry; 1290 last = entry;
657 max_extent = 0; 1291 max_extent = 0;
658 total_retries++;
659 if (total_retries % 64 == 0) {
660 if (min_bytes >= (bytes + empty_size)) {
661 ret = -ENOSPC;
662 goto out;
663 }
664 /*
665 * grow our allocation a bit, we're not having
666 * much luck
667 */
668 min_bytes *= 2;
669 goto again;
670 }
671 } else { 1292 } else {
672 last = next; 1293 last = next;
673 window_free += next->bytes; 1294 window_free += next->bytes;
@@ -685,11 +1306,19 @@ again:
685 * The cluster includes an rbtree, but only uses the offset index 1306 * The cluster includes an rbtree, but only uses the offset index
686 * of each free space cache entry. 1307 * of each free space cache entry.
687 */ 1308 */
688 while(1) { 1309 while (1) {
689 node = rb_next(&entry->offset_index); 1310 node = rb_next(&entry->offset_index);
690 unlink_free_space(block_group, entry); 1311 if (entry->bitmap && node) {
1312 entry = rb_entry(node, struct btrfs_free_space,
1313 offset_index);
1314 continue;
1315 } else if (entry->bitmap && !node) {
1316 break;
1317 }
1318
1319 rb_erase(&entry->offset_index, &block_group->free_space_offset);
691 ret = tree_insert_offset(&cluster->root, entry->offset, 1320 ret = tree_insert_offset(&cluster->root, entry->offset,
692 &entry->offset_index); 1321 &entry->offset_index, 0);
693 BUG_ON(ret); 1322 BUG_ON(ret);
694 1323
695 if (!node || entry == last) 1324 if (!node || entry == last)
@@ -697,8 +1326,10 @@ again:
697 1326
698 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1327 entry = rb_entry(node, struct btrfs_free_space, offset_index);
699 } 1328 }
700 ret = 0; 1329
701 cluster->max_size = max_extent; 1330 cluster->max_size = max_extent;
1331got_it:
1332 ret = 0;
702 atomic_inc(&block_group->count); 1333 atomic_inc(&block_group->count);
703 list_add_tail(&cluster->block_group_list, &block_group->cluster_list); 1334 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
704 cluster->block_group = block_group; 1335 cluster->block_group = block_group;
@@ -718,6 +1349,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
718 spin_lock_init(&cluster->refill_lock); 1349 spin_lock_init(&cluster->refill_lock);
719 cluster->root.rb_node = NULL; 1350 cluster->root.rb_node = NULL;
720 cluster->max_size = 0; 1351 cluster->max_size = 0;
1352 cluster->points_to_bitmap = false;
721 INIT_LIST_HEAD(&cluster->block_group_list); 1353 INIT_LIST_HEAD(&cluster->block_group_list);
722 cluster->block_group = NULL; 1354 cluster->block_group = NULL;
723} 1355}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 266fb876405..890a8e79011 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -19,6 +19,14 @@
19#ifndef __BTRFS_FREE_SPACE_CACHE 19#ifndef __BTRFS_FREE_SPACE_CACHE
20#define __BTRFS_FREE_SPACE_CACHE 20#define __BTRFS_FREE_SPACE_CACHE
21 21
22struct btrfs_free_space {
23 struct rb_node offset_index;
24 u64 offset;
25 u64 bytes;
26 unsigned long *bitmap;
27 struct list_head list;
28};
29
22int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 30int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
23 u64 bytenr, u64 size); 31 u64 bytenr, u64 size);
24int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 32int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dbe1aabf96c..272b9b2bea8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -26,7 +26,6 @@
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/smp_lock.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/mpage.h> 30#include <linux/mpage.h>
32#include <linux/swap.h> 31#include <linux/swap.h>
@@ -2604,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2604 if (root->ref_cows) 2603 if (root->ref_cows)
2605 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 2604 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2606 path = btrfs_alloc_path(); 2605 path = btrfs_alloc_path();
2607 path->reada = -1;
2608 BUG_ON(!path); 2606 BUG_ON(!path);
2607 path->reada = -1;
2609 2608
2610 /* FIXME, add redo link to tree so we don't leak on crash */ 2609 /* FIXME, add redo link to tree so we don't leak on crash */
2611 key.objectid = inode->i_ino; 2610 key.objectid = inode->i_ino;
@@ -3580,12 +3579,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3580 owner = 1; 3579 owner = 1;
3581 BTRFS_I(inode)->block_group = 3580 BTRFS_I(inode)->block_group =
3582 btrfs_find_block_group(root, 0, alloc_hint, owner); 3581 btrfs_find_block_group(root, 0, alloc_hint, owner);
3583 if ((mode & S_IFREG)) {
3584 if (btrfs_test_opt(root, NODATASUM))
3585 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
3586 if (btrfs_test_opt(root, NODATACOW))
3587 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
3588 }
3589 3582
3590 key[0].objectid = objectid; 3583 key[0].objectid = objectid;
3591 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 3584 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -3640,6 +3633,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3640 3633
3641 btrfs_inherit_iflags(inode, dir); 3634 btrfs_inherit_iflags(inode, dir);
3642 3635
3636 if ((mode & S_IFREG)) {
3637 if (btrfs_test_opt(root, NODATASUM))
3638 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
3639 if (btrfs_test_opt(root, NODATACOW))
3640 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
3641 }
3642
3643 insert_inode_hash(inode); 3643 insert_inode_hash(inode);
3644 inode_tree_add(inode); 3644 inode_tree_add(inode);
3645 return inode; 3645 return inode;
@@ -4785,8 +4785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4785 * and the replacement file is large. Start IO on it now so 4785 * and the replacement file is large. Start IO on it now so
4786 * we don't add too much work to the end of the transaction 4786 * we don't add too much work to the end of the transaction
4787 */ 4787 */
4788 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && 4788 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
4789 new_inode->i_size &&
4790 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 4789 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4791 filemap_flush(old_inode->i_mapping); 4790 filemap_flush(old_inode->i_mapping);
4792 4791
@@ -5082,6 +5081,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5082 u64 mask = BTRFS_I(inode)->root->sectorsize - 1; 5081 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5083 struct extent_map *em; 5082 struct extent_map *em;
5084 struct btrfs_trans_handle *trans; 5083 struct btrfs_trans_handle *trans;
5084 struct btrfs_root *root;
5085 int ret; 5085 int ret;
5086 5086
5087 alloc_start = offset & ~mask; 5087 alloc_start = offset & ~mask;
@@ -5100,6 +5100,13 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5100 goto out; 5100 goto out;
5101 } 5101 }
5102 5102
5103 root = BTRFS_I(inode)->root;
5104
5105 ret = btrfs_check_data_free_space(root, inode,
5106 alloc_end - alloc_start);
5107 if (ret)
5108 goto out;
5109
5103 locked_end = alloc_end - 1; 5110 locked_end = alloc_end - 1;
5104 while (1) { 5111 while (1) {
5105 struct btrfs_ordered_extent *ordered; 5112 struct btrfs_ordered_extent *ordered;
@@ -5107,7 +5114,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5107 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); 5114 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5108 if (!trans) { 5115 if (!trans) {
5109 ret = -EIO; 5116 ret = -EIO;
5110 goto out; 5117 goto out_free;
5111 } 5118 }
5112 5119
5113 /* the extent lock is ordered inside the running 5120 /* the extent lock is ordered inside the running
@@ -5168,6 +5175,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5168 GFP_NOFS); 5175 GFP_NOFS);
5169 5176
5170 btrfs_end_transaction(trans, BTRFS_I(inode)->root); 5177 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5178out_free:
5179 btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
5171out: 5180out:
5172 mutex_unlock(&inode->i_mutex); 5181 mutex_unlock(&inode->i_mutex);
5173 return ret; 5182 return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index eff18f5b536..bd88f25889f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -27,7 +27,6 @@
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/smp_lock.h>
31#include <linux/backing-dev.h> 30#include <linux/backing-dev.h>
32#include <linux/mount.h> 31#include <linux/mount.h>
33#include <linux/mpage.h> 32#include <linux/mpage.h>
@@ -1028,7 +1027,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1028 struct btrfs_file_extent_item); 1027 struct btrfs_file_extent_item);
1029 comp = btrfs_file_extent_compression(leaf, extent); 1028 comp = btrfs_file_extent_compression(leaf, extent);
1030 type = btrfs_file_extent_type(leaf, extent); 1029 type = btrfs_file_extent_type(leaf, extent);
1031 if (type == BTRFS_FILE_EXTENT_REG) { 1030 if (type == BTRFS_FILE_EXTENT_REG ||
1031 type == BTRFS_FILE_EXTENT_PREALLOC) {
1032 disko = btrfs_file_extent_disk_bytenr(leaf, 1032 disko = btrfs_file_extent_disk_bytenr(leaf,
1033 extent); 1033 extent);
1034 diskl = btrfs_file_extent_disk_num_bytes(leaf, 1034 diskl = btrfs_file_extent_disk_num_bytes(leaf,
@@ -1051,7 +1051,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1051 new_key.objectid = inode->i_ino; 1051 new_key.objectid = inode->i_ino;
1052 new_key.offset = key.offset + destoff - off; 1052 new_key.offset = key.offset + destoff - off;
1053 1053
1054 if (type == BTRFS_FILE_EXTENT_REG) { 1054 if (type == BTRFS_FILE_EXTENT_REG ||
1055 type == BTRFS_FILE_EXTENT_PREALLOC) {
1055 ret = btrfs_insert_empty_item(trans, root, path, 1056 ret = btrfs_insert_empty_item(trans, root, path,
1056 &new_key, size); 1057 &new_key, size);
1057 if (ret) 1058 if (ret)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 6d6523da0a3..0d126be22b6 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
309 } 309 }
310 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", 310 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
311 (unsigned long long)btrfs_header_bytenr(c), 311 (unsigned long long)btrfs_header_bytenr(c),
312 btrfs_header_level(c), nr, 312 level, nr,
313 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); 313 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
314 for (i = 0; i < nr; i++) { 314 for (i = 0; i < nr; i++) {
315 btrfs_node_key_to_cpu(c, &key, i); 315 btrfs_node_key_to_cpu(c, &key, i);
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
326 btrfs_level_size(root, level - 1), 326 btrfs_level_size(root, level - 1),
327 btrfs_node_ptr_generation(c, i)); 327 btrfs_node_ptr_generation(c, i));
328 if (btrfs_is_leaf(next) && 328 if (btrfs_is_leaf(next) &&
329 btrfs_header_level(c) != 1) 329 level != 1)
330 BUG(); 330 BUG();
331 if (btrfs_header_level(next) != 331 if (btrfs_header_level(next) !=
332 btrfs_header_level(c) - 1) 332 level - 1)
333 BUG(); 333 BUG();
334 btrfs_print_tree(root, next); 334 btrfs_print_tree(root, next);
335 free_extent_buffer(next); 335 free_extent_buffer(next);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b23dc209ae1..c04f7f21260 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -670,6 +670,8 @@ again:
670 err = ret; 670 err = ret;
671 goto out; 671 goto out;
672 } 672 }
673 if (ret > 0 && path2->slots[level] > 0)
674 path2->slots[level]--;
673 675
674 eb = path2->nodes[level]; 676 eb = path2->nodes[level];
675 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 677 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1609 BUG_ON(level == 0); 1611 BUG_ON(level == 0);
1610 path->lowest_level = level; 1612 path->lowest_level = level;
1611 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1613 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1614 path->lowest_level = 0;
1612 if (ret < 0) { 1615 if (ret < 0) {
1613 btrfs_free_path(path); 1616 btrfs_free_path(path);
1614 return ret; 1617 return ret;
@@ -1788,7 +1791,7 @@ static void merge_func(struct btrfs_work *work)
1788 btrfs_end_transaction(trans, root); 1791 btrfs_end_transaction(trans, root);
1789 } 1792 }
1790 1793
1791 btrfs_drop_dead_root(reloc_root); 1794 btrfs_drop_snapshot(reloc_root, 0);
1792 1795
1793 if (atomic_dec_and_test(async->num_pending)) 1796 if (atomic_dec_and_test(async->num_pending))
1794 complete(async->done); 1797 complete(async->done);
@@ -2075,9 +2078,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2075 2078
2076 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2079 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2077 BUG_ON(ret); 2080 BUG_ON(ret);
2078
2079 btrfs_tree_unlock(eb);
2080 free_extent_buffer(eb);
2081 } 2081 }
2082 if (!lowest) { 2082 if (!lowest) {
2083 btrfs_tree_unlock(upper->eb); 2083 btrfs_tree_unlock(upper->eb);
@@ -2553,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len)
2553 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; 2553 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2554 2554
2555 /* make sure the dirty trick played by the caller work */ 2555 /* make sure the dirty trick played by the caller work */
2556 ret = invalidate_inode_pages2_range(inode->i_mapping, 2556 while (1) {
2557 first_index, last_index); 2557 ret = invalidate_inode_pages2_range(inode->i_mapping,
2558 first_index, last_index);
2559 if (ret != -EBUSY)
2560 break;
2561 schedule_timeout(HZ/10);
2562 }
2558 if (ret) 2563 if (ret)
2559 goto out_unlock; 2564 goto out_unlock;
2560 2565
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9f179d4832d..6d6d06cb6df 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -26,7 +26,6 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/smp_lock.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/mount.h> 30#include <linux/mount.h>
32#include <linux/mpage.h> 31#include <linux/mpage.h>
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4e83457ea25..cdbb5022da5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -40,6 +40,12 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
40 } 40 }
41} 41}
42 42
43static noinline void switch_commit_root(struct btrfs_root *root)
44{
45 free_extent_buffer(root->commit_root);
46 root->commit_root = btrfs_root_node(root);
47}
48
43/* 49/*
44 * either allocate a new transaction or hop into the existing one 50 * either allocate a new transaction or hop into the existing one
45 */ 51 */
@@ -444,9 +450,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
444 450
445 btrfs_write_dirty_block_groups(trans, root); 451 btrfs_write_dirty_block_groups(trans, root);
446 452
447 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
448 BUG_ON(ret);
449
450 while (1) { 453 while (1) {
451 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 454 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
452 if (old_root_bytenr == root->node->start) 455 if (old_root_bytenr == root->node->start)
@@ -457,13 +460,14 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
457 &root->root_key, 460 &root->root_key,
458 &root->root_item); 461 &root->root_item);
459 BUG_ON(ret); 462 BUG_ON(ret);
460 btrfs_write_dirty_block_groups(trans, root);
461 463
462 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 464 ret = btrfs_write_dirty_block_groups(trans, root);
463 BUG_ON(ret); 465 BUG_ON(ret);
464 } 466 }
465 free_extent_buffer(root->commit_root); 467
466 root->commit_root = btrfs_root_node(root); 468 if (root != root->fs_info->extent_root)
469 switch_commit_root(root);
470
467 return 0; 471 return 0;
468} 472}
469 473
@@ -495,10 +499,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
495 root = list_entry(next, struct btrfs_root, dirty_list); 499 root = list_entry(next, struct btrfs_root, dirty_list);
496 500
497 update_cowonly_root(trans, root); 501 update_cowonly_root(trans, root);
498
499 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
500 BUG_ON(ret);
501 } 502 }
503
504 down_write(&fs_info->extent_commit_sem);
505 switch_commit_root(fs_info->extent_root);
506 up_write(&fs_info->extent_commit_sem);
507
502 return 0; 508 return 0;
503} 509}
504 510
@@ -544,8 +550,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
544 btrfs_update_reloc_root(trans, root); 550 btrfs_update_reloc_root(trans, root);
545 551
546 if (root->commit_root != root->node) { 552 if (root->commit_root != root->node) {
547 free_extent_buffer(root->commit_root); 553 switch_commit_root(root);
548 root->commit_root = btrfs_root_node(root);
549 btrfs_set_root_node(&root->root_item, 554 btrfs_set_root_node(&root->root_item,
550 root->node); 555 root->node);
551 } 556 }
@@ -593,6 +598,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
593 return 0; 598 return 0;
594} 599}
595 600
601#if 0
596/* 602/*
597 * when dropping snapshots, we generate a ton of delayed refs, and it makes 603 * when dropping snapshots, we generate a ton of delayed refs, and it makes
598 * sense not to join the transaction while it is trying to flush the current 604 * sense not to join the transaction while it is trying to flush the current
@@ -681,6 +687,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root)
681 btrfs_btree_balance_dirty(tree_root, nr); 687 btrfs_btree_balance_dirty(tree_root, nr);
682 return ret; 688 return ret;
683} 689}
690#endif
684 691
685/* 692/*
686 * new snapshots need to be created at a very specific time in the 693 * new snapshots need to be created at a very specific time in the
@@ -850,6 +857,16 @@ static void update_super_roots(struct btrfs_root *root)
850 super->root_level = root_item->level; 857 super->root_level = root_item->level;
851} 858}
852 859
860int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
861{
862 int ret = 0;
863 spin_lock(&info->new_trans_lock);
864 if (info->running_transaction)
865 ret = info->running_transaction->in_commit;
866 spin_unlock(&info->new_trans_lock);
867 return ret;
868}
869
853int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 870int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
854 struct btrfs_root *root) 871 struct btrfs_root *root)
855{ 872{
@@ -941,9 +958,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
941 958
942 mutex_unlock(&root->fs_info->trans_mutex); 959 mutex_unlock(&root->fs_info->trans_mutex);
943 960
944 if (flush_on_commit || snap_pending) { 961 if (flush_on_commit) {
945 if (flush_on_commit) 962 btrfs_start_delalloc_inodes(root);
946 btrfs_start_delalloc_inodes(root); 963 ret = btrfs_wait_ordered_extents(root, 0);
964 BUG_ON(ret);
965 } else if (snap_pending) {
947 ret = btrfs_wait_ordered_extents(root, 1); 966 ret = btrfs_wait_ordered_extents(root, 1);
948 BUG_ON(ret); 967 BUG_ON(ret);
949 } 968 }
@@ -1007,15 +1026,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1007 1026
1008 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1027 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1009 root->fs_info->tree_root->node); 1028 root->fs_info->tree_root->node);
1010 free_extent_buffer(root->fs_info->tree_root->commit_root); 1029 switch_commit_root(root->fs_info->tree_root);
1011 root->fs_info->tree_root->commit_root =
1012 btrfs_root_node(root->fs_info->tree_root);
1013 1030
1014 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1031 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1015 root->fs_info->chunk_root->node); 1032 root->fs_info->chunk_root->node);
1016 free_extent_buffer(root->fs_info->chunk_root->commit_root); 1033 switch_commit_root(root->fs_info->chunk_root);
1017 root->fs_info->chunk_root->commit_root =
1018 btrfs_root_node(root->fs_info->chunk_root);
1019 1034
1020 update_super_roots(root); 1035 update_super_roots(root);
1021 1036
@@ -1055,6 +1070,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1055 cur_trans->commit_done = 1; 1070 cur_trans->commit_done = 1;
1056 1071
1057 root->fs_info->last_trans_committed = cur_trans->transid; 1072 root->fs_info->last_trans_committed = cur_trans->transid;
1073
1058 wake_up(&cur_trans->commit_wait); 1074 wake_up(&cur_trans->commit_wait);
1059 1075
1060 put_transaction(cur_trans); 1076 put_transaction(cur_trans);
@@ -1081,7 +1097,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1081 while (!list_empty(&list)) { 1097 while (!list_empty(&list)) {
1082 root = list_entry(list.next, struct btrfs_root, root_list); 1098 root = list_entry(list.next, struct btrfs_root, root_list);
1083 list_del_init(&root->root_list); 1099 list_del_init(&root->root_list);
1084 btrfs_drop_dead_root(root); 1100 btrfs_drop_snapshot(root, 0);
1085 } 1101 }
1086 return 0; 1102 return 0;
1087} 1103}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 961c3ee5a2e..663c6740491 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -107,4 +107,5 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root); 107 struct btrfs_root *root);
108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
109 struct extent_io_tree *dirty_pages); 109 struct extent_io_tree *dirty_pages);
110int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
110#endif 111#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c13922206d1..d91b0de7c50 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
797 return -ENOENT; 797 return -ENOENT;
798 798
799 inode = read_one_inode(root, key->objectid); 799 inode = read_one_inode(root, key->objectid);
800 BUG_ON(!dir); 800 BUG_ON(!inode);
801 801
802 ref_ptr = btrfs_item_ptr_offset(eb, slot); 802 ref_ptr = btrfs_item_ptr_offset(eb, slot);
803 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 803 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3ab80e9cd76..5dbefd11b4a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -721,7 +721,8 @@ error:
721 */ 721 */
722static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, 722static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
723 struct btrfs_device *device, 723 struct btrfs_device *device,
724 u64 num_bytes, u64 *start) 724 u64 num_bytes, u64 *start,
725 u64 *max_avail)
725{ 726{
726 struct btrfs_key key; 727 struct btrfs_key key;
727 struct btrfs_root *root = device->dev_root; 728 struct btrfs_root *root = device->dev_root;
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
758 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 759 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
759 if (ret < 0) 760 if (ret < 0)
760 goto error; 761 goto error;
761 ret = btrfs_previous_item(root, path, 0, key.type); 762 if (ret > 0) {
762 if (ret < 0) 763 ret = btrfs_previous_item(root, path, key.objectid, key.type);
763 goto error; 764 if (ret < 0)
765 goto error;
766 if (ret > 0)
767 start_found = 1;
768 }
764 l = path->nodes[0]; 769 l = path->nodes[0];
765 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 770 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
766 while (1) { 771 while (1) {
@@ -803,6 +808,10 @@ no_more_items:
803 if (last_byte < search_start) 808 if (last_byte < search_start)
804 last_byte = search_start; 809 last_byte = search_start;
805 hole_size = key.offset - last_byte; 810 hole_size = key.offset - last_byte;
811
812 if (hole_size > *max_avail)
813 *max_avail = hole_size;
814
806 if (key.offset > last_byte && 815 if (key.offset > last_byte &&
807 hole_size >= num_bytes) { 816 hole_size >= num_bytes) {
808 *start = last_byte; 817 *start = last_byte;
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1621 device->fs_devices->total_rw_bytes += diff; 1630 device->fs_devices->total_rw_bytes += diff;
1622 1631
1623 device->total_bytes = new_size; 1632 device->total_bytes = new_size;
1633 device->disk_total_bytes = new_size;
1624 btrfs_clear_space_info_full(device->dev_root->fs_info); 1634 btrfs_clear_space_info_full(device->dev_root->fs_info);
1625 1635
1626 return btrfs_update_device(trans, device); 1636 return btrfs_update_device(trans, device);
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2007 goto done; 2017 goto done;
2008 if (ret) { 2018 if (ret) {
2009 ret = 0; 2019 ret = 0;
2010 goto done; 2020 break;
2011 } 2021 }
2012 2022
2013 l = path->nodes[0]; 2023 l = path->nodes[0];
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2015 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2025 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2016 2026
2017 if (key.objectid != device->devid) 2027 if (key.objectid != device->devid)
2018 goto done; 2028 break;
2019 2029
2020 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2030 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2021 length = btrfs_dev_extent_length(l, dev_extent); 2031 length = btrfs_dev_extent_length(l, dev_extent);
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2171 max_chunk_size); 2181 max_chunk_size);
2172 2182
2173again: 2183again:
2184 max_avail = 0;
2174 if (!map || map->num_stripes != num_stripes) { 2185 if (!map || map->num_stripes != num_stripes) {
2175 kfree(map); 2186 kfree(map);
2176 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2187 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2219,7 +2230,8 @@ again:
2219 2230
2220 if (device->in_fs_metadata && avail >= min_free) { 2231 if (device->in_fs_metadata && avail >= min_free) {
2221 ret = find_free_dev_extent(trans, device, 2232 ret = find_free_dev_extent(trans, device,
2222 min_free, &dev_offset); 2233 min_free, &dev_offset,
2234 &max_avail);
2223 if (ret == 0) { 2235 if (ret == 0) {
2224 list_move_tail(&device->dev_alloc_list, 2236 list_move_tail(&device->dev_alloc_list,
2225 &private_devs); 2237 &private_devs);
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2795 } 2807 }
2796 } 2808 }
2797 2809
2798 for (i = 0; i > nr; i++) {
2799 struct btrfs_multi_bio *multi;
2800 struct btrfs_bio_stripe *stripe;
2801 int ret;
2802
2803 length = 1;
2804 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2805 &length, &multi, 0);
2806 BUG_ON(ret);
2807
2808 stripe = multi->stripes;
2809 for (j = 0; j < multi->num_stripes; j++) {
2810 if (stripe->physical >= physical &&
2811 physical < stripe->physical + length)
2812 break;
2813 }
2814 BUG_ON(j >= multi->num_stripes);
2815 kfree(multi);
2816 }
2817
2818 *logical = buf; 2810 *logical = buf;
2819 *naddrs = nr; 2811 *naddrs = nr;
2820 *stripe_len = map->stripe_len; 2812 *stripe_len = map->stripe_len;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ecfbce836d3..3e2b90eaa23 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping,
208 *total_in = 0; 208 *total_in = 0;
209 209
210 workspace = find_zlib_workspace(); 210 workspace = find_zlib_workspace();
211 if (!workspace) 211 if (IS_ERR(workspace))
212 return -1; 212 return -1;
213 213
214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { 214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
@@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
366 char *kaddr; 366 char *kaddr;
367 367
368 workspace = find_zlib_workspace(); 368 workspace = find_zlib_workspace();
369 if (!workspace) 369 if (IS_ERR(workspace))
370 return -ENOMEM; 370 return -ENOMEM;
371 371
372 data_in = kmap(pages_in[page_in_index]); 372 data_in = kmap(pages_in[page_in_index]);
@@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in,
547 return -ENOMEM; 547 return -ENOMEM;
548 548
549 workspace = find_zlib_workspace(); 549 workspace = find_zlib_workspace();
550 if (!workspace) 550 if (IS_ERR(workspace))
551 return -ENOMEM; 551 return -ENOMEM;
552 552
553 workspace->inf_strm.next_in = data_in; 553 workspace->inf_strm.next_in = data_in;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index b7c9d5187a7..a173551e19d 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -13,7 +13,6 @@
13#include <linux/major.h> 13#include <linux/major.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/seq_file.h> 16#include <linux/seq_file.h>
18 17
19#include <linux/kobject.h> 18#include <linux/kobject.h>
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index b4868983942..e85b1e4389e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
1Version 1.60
2-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path.
4Set s_maxbytes to smaller (the max that vfs can handle) so that
5sendfile will now work over cifs mounts again. Add noforcegid
6and noforceuid mount parameters.
7
1Version 1.59 8Version 1.59
2------------ 9------------
3Client uses server inode numbers (which are persistent) rather than 10Client uses server inode numbers (which are persistent) rather than
@@ -5,7 +12,11 @@ client generated ones by default (mount option "serverino" turned
5on by default if server supports it). Add forceuid and forcegid 12on by default if server supports it). Add forceuid and forcegid
6mount options (so that when negotiating unix extensions specifying 13mount options (so that when negotiating unix extensions specifying
7which uid mounted does not immediately force the server's reported 14which uid mounted does not immediately force the server's reported
8uids to be overridden). 15uids to be overridden). Add support for scope mount parm. Improve
16hard link detection to use same inode for both. Do not set
17read-only dos attribute on directories (for chmod) since Windows
18explorer special cases this attribute bit for directories for
19a different purpose.
9 20
10Version 1.58 21Version 1.58
11------------ 22------------
diff --git a/fs/cifs/README b/fs/cifs/README
index ad92921dbde..79c1a93400b 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -262,11 +262,11 @@ A partial list of the supported mount options follows:
262 mount. 262 mount.
263 domain Set the SMB/CIFS workgroup name prepended to the 263 domain Set the SMB/CIFS workgroup name prepended to the
264 username during CIFS session establishment 264 username during CIFS session establishment
265 forceuid Set the default uid for inodes based on the uid 265 forceuid Set the default uid for inodes to the uid
266 passed in. For mounts to servers 266 passed in on mount. For mounts to servers
267 which do support the CIFS Unix extensions, such as a 267 which do support the CIFS Unix extensions, such as a
268 properly configured Samba server, the server provides 268 properly configured Samba server, the server provides
269 the uid, gid and mode so this parameter should not be 269 the uid, gid and mode so this parameter should not be
270 specified unless the server and clients uid and gid 270 specified unless the server and clients uid and gid
271 numbering differ. If the server and client are in the 271 numbering differ. If the server and client are in the
272 same domain (e.g. running winbind or nss_ldap) and 272 same domain (e.g. running winbind or nss_ldap) and
@@ -278,11 +278,7 @@ A partial list of the supported mount options follows:
278 of existing files will be the uid (gid) of the person 278 of existing files will be the uid (gid) of the person
279 who executed the mount (root, except when mount.cifs 279 who executed the mount (root, except when mount.cifs
280 is configured setuid for user mounts) unless the "uid=" 280 is configured setuid for user mounts) unless the "uid="
281 (gid) mount option is specified. For the uid (gid) of newly 281 (gid) mount option is specified. Also note that permission
282 created files and directories, ie files created since
283 the last mount of the server share, the expected uid
284 (gid) is cached as long as the inode remains in
285 memory on the client. Also note that permission
286 checks (authorization checks) on accesses to a file occur 282 checks (authorization checks) on accesses to a file occur
287 at the server, but there are cases in which an administrator 283 at the server, but there are cases in which an administrator
288 may want to restrict at the client as well. For those 284 may want to restrict at the client as well. For those
@@ -290,12 +286,15 @@ A partial list of the supported mount options follows:
290 (such as Windows), permissions can also be checked at the 286 (such as Windows), permissions can also be checked at the
291 client, and a crude form of client side permission checking 287 client, and a crude form of client side permission checking
292 can be enabled by specifying file_mode and dir_mode on 288 can be enabled by specifying file_mode and dir_mode on
293 the client. Note that the mount.cifs helper must be 289 the client. (default)
294 at version 1.10 or higher to support specifying the uid 290 forcegid (similar to above but for the groupid instead of uid) (default)
295 (or gid) in non-numeric form. 291 noforceuid Fill in file owner information (uid) by requesting it from
296 forcegid (similar to above but for the groupid instead of uid) 292 the server if possible. With this option, the value given in
293 the uid= option (on mount) will only be used if the server
294 can not support returning uids on inodes.
295 noforcegid (similar to above but for the group owner, gid, instead of uid)
297 uid Set the default uid for inodes, and indicate to the 296 uid Set the default uid for inodes, and indicate to the
298 cifs kernel driver which local user mounted . If the server 297 cifs kernel driver which local user mounted. If the server
299 supports the unix extensions the default uid is 298 supports the unix extensions the default uid is
300 not used to fill in the owner fields of inodes (files) 299 not used to fill in the owner fields of inodes (files)
301 unless the "forceuid" parameter is specified. 300 unless the "forceuid" parameter is specified.
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 1b09f167006..20692fbfdb2 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -49,6 +49,7 @@
49#define ASN1_OJI 6 /* Object Identifier */ 49#define ASN1_OJI 6 /* Object Identifier */
50#define ASN1_OJD 7 /* Object Description */ 50#define ASN1_OJD 7 /* Object Description */
51#define ASN1_EXT 8 /* External */ 51#define ASN1_EXT 8 /* External */
52#define ASN1_ENUM 10 /* Enumerated */
52#define ASN1_SEQ 16 /* Sequence */ 53#define ASN1_SEQ 16 /* Sequence */
53#define ASN1_SET 17 /* Set */ 54#define ASN1_SET 17 /* Set */
54#define ASN1_NUMSTR 18 /* Numerical String */ 55#define ASN1_NUMSTR 18 /* Numerical String */
@@ -78,10 +79,12 @@
78#define SPNEGO_OID_LEN 7 79#define SPNEGO_OID_LEN 7
79#define NTLMSSP_OID_LEN 10 80#define NTLMSSP_OID_LEN 10
80#define KRB5_OID_LEN 7 81#define KRB5_OID_LEN 7
82#define KRB5U2U_OID_LEN 8
81#define MSKRB5_OID_LEN 7 83#define MSKRB5_OID_LEN 7
82static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 }; 84static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
83static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 }; 85static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
84static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 }; 86static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
87static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
85static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 }; 88static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
86 89
87/* 90/*
@@ -122,6 +125,28 @@ asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
122 return 1; 125 return 1;
123} 126}
124 127
128#if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */
129static unsigned char
130asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
131{
132 unsigned char ch;
133
134 if (ctx->pointer >= ctx->end) {
135 ctx->error = ASN1_ERR_DEC_EMPTY;
136 return 0;
137 }
138
139 ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to lenght octet */
140 if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
141 *val = *(++(ctx->pointer)); /* value has enum value */
142 else
143 return 0;
144
145 ctx->pointer++;
146 return 1;
147}
148#endif
149
125static unsigned char 150static unsigned char
126asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) 151asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
127{ 152{
@@ -476,10 +501,9 @@ decode_negTokenInit(unsigned char *security_blob, int length,
476 unsigned int cls, con, tag, oidlen, rc; 501 unsigned int cls, con, tag, oidlen, rc;
477 bool use_ntlmssp = false; 502 bool use_ntlmssp = false;
478 bool use_kerberos = false; 503 bool use_kerberos = false;
504 bool use_kerberosu2u = false;
479 bool use_mskerberos = false; 505 bool use_mskerberos = false;
480 506
481 *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
482
483 /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */ 507 /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
484 508
485 asn1_open(&ctx, security_blob, length); 509 asn1_open(&ctx, security_blob, length);
@@ -515,6 +539,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
515 return 0; 539 return 0;
516 } 540 }
517 541
542 /* SPNEGO */
518 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 543 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
519 cFYI(1, ("Error decoding negTokenInit")); 544 cFYI(1, ("Error decoding negTokenInit"));
520 return 0; 545 return 0;
@@ -526,6 +551,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
526 return 0; 551 return 0;
527 } 552 }
528 553
554 /* negTokenInit */
529 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 555 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
530 cFYI(1, ("Error decoding negTokenInit")); 556 cFYI(1, ("Error decoding negTokenInit"));
531 return 0; 557 return 0;
@@ -537,6 +563,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
537 return 0; 563 return 0;
538 } 564 }
539 565
566 /* sequence */
540 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 567 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
541 cFYI(1, ("Error decoding 2nd part of negTokenInit")); 568 cFYI(1, ("Error decoding 2nd part of negTokenInit"));
542 return 0; 569 return 0;
@@ -548,6 +575,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
548 return 0; 575 return 0;
549 } 576 }
550 577
578 /* sequence of */
551 if (asn1_header_decode 579 if (asn1_header_decode
552 (&ctx, &sequence_end, &cls, &con, &tag) == 0) { 580 (&ctx, &sequence_end, &cls, &con, &tag) == 0) {
553 cFYI(1, ("Error decoding 2nd part of negTokenInit")); 581 cFYI(1, ("Error decoding 2nd part of negTokenInit"));
@@ -560,6 +588,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
560 return 0; 588 return 0;
561 } 589 }
562 590
591 /* list of security mechanisms */
563 while (!asn1_eoc_decode(&ctx, sequence_end)) { 592 while (!asn1_eoc_decode(&ctx, sequence_end)) {
564 rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); 593 rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
565 if (!rc) { 594 if (!rc) {
@@ -576,11 +605,15 @@ decode_negTokenInit(unsigned char *security_blob, int length,
576 605
577 if (compare_oid(oid, oidlen, MSKRB5_OID, 606 if (compare_oid(oid, oidlen, MSKRB5_OID,
578 MSKRB5_OID_LEN) && 607 MSKRB5_OID_LEN) &&
579 !use_kerberos) 608 !use_mskerberos)
580 use_mskerberos = true; 609 use_mskerberos = true;
610 else if (compare_oid(oid, oidlen, KRB5U2U_OID,
611 KRB5U2U_OID_LEN) &&
612 !use_kerberosu2u)
613 use_kerberosu2u = true;
581 else if (compare_oid(oid, oidlen, KRB5_OID, 614 else if (compare_oid(oid, oidlen, KRB5_OID,
582 KRB5_OID_LEN) && 615 KRB5_OID_LEN) &&
583 !use_mskerberos) 616 !use_kerberos)
584 use_kerberos = true; 617 use_kerberos = true;
585 else if (compare_oid(oid, oidlen, NTLMSSP_OID, 618 else if (compare_oid(oid, oidlen, NTLMSSP_OID,
586 NTLMSSP_OID_LEN)) 619 NTLMSSP_OID_LEN))
@@ -593,7 +626,12 @@ decode_negTokenInit(unsigned char *security_blob, int length,
593 } 626 }
594 } 627 }
595 628
629 /* mechlistMIC */
596 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 630 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
631 /* Check if we have reached the end of the blob, but with
632 no mechListMic (e.g. NTLMSSP instead of KRB5) */
633 if (ctx.error == ASN1_ERR_DEC_EMPTY)
634 goto decode_negtoken_exit;
597 cFYI(1, ("Error decoding last part negTokenInit exit3")); 635 cFYI(1, ("Error decoding last part negTokenInit exit3"));
598 return 0; 636 return 0;
599 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { 637 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
@@ -602,6 +640,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
602 cls, con, tag, end, *end)); 640 cls, con, tag, end, *end));
603 return 0; 641 return 0;
604 } 642 }
643
644 /* sequence */
605 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 645 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
606 cFYI(1, ("Error decoding last part negTokenInit exit5")); 646 cFYI(1, ("Error decoding last part negTokenInit exit5"));
607 return 0; 647 return 0;
@@ -611,6 +651,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
611 cls, con, tag, end, *end)); 651 cls, con, tag, end, *end));
612 } 652 }
613 653
654 /* sequence of */
614 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 655 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
615 cFYI(1, ("Error decoding last part negTokenInit exit 7")); 656 cFYI(1, ("Error decoding last part negTokenInit exit 7"));
616 return 0; 657 return 0;
@@ -619,6 +660,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
619 cls, con, tag, end, *end)); 660 cls, con, tag, end, *end));
620 return 0; 661 return 0;
621 } 662 }
663
664 /* general string */
622 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 665 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
623 cFYI(1, ("Error decoding last part negTokenInit exit9")); 666 cFYI(1, ("Error decoding last part negTokenInit exit9"));
624 return 0; 667 return 0;
@@ -630,13 +673,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
630 } 673 }
631 cFYI(1, ("Need to call asn1_octets_decode() function for %s", 674 cFYI(1, ("Need to call asn1_octets_decode() function for %s",
632 ctx.pointer)); /* is this UTF-8 or ASCII? */ 675 ctx.pointer)); /* is this UTF-8 or ASCII? */
633 676decode_negtoken_exit:
634 if (use_kerberos) 677 if (use_kerberos)
635 *secType = Kerberos; 678 *secType = Kerberos;
636 else if (use_mskerberos) 679 else if (use_mskerberos)
637 *secType = MSKerberos; 680 *secType = MSKerberos;
638 else if (use_ntlmssp) 681 else if (use_ntlmssp)
639 *secType = NTLMSSP; 682 *secType = RawNTLMSSP;
640 683
641 return 1; 684 return 1;
642} 685}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 7f19fefd3d4..42cec2a7c0c 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -261,6 +261,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
261 atomic_set(&tcon->num_reads, 0); 261 atomic_set(&tcon->num_reads, 0);
262 atomic_set(&tcon->num_oplock_brks, 0); 262 atomic_set(&tcon->num_oplock_brks, 0);
263 atomic_set(&tcon->num_opens, 0); 263 atomic_set(&tcon->num_opens, 0);
264 atomic_set(&tcon->num_posixopens, 0);
265 atomic_set(&tcon->num_posixmkdirs, 0);
264 atomic_set(&tcon->num_closes, 0); 266 atomic_set(&tcon->num_closes, 0);
265 atomic_set(&tcon->num_deletes, 0); 267 atomic_set(&tcon->num_deletes, 0);
266 atomic_set(&tcon->num_mkdirs, 0); 268 atomic_set(&tcon->num_mkdirs, 0);
@@ -347,11 +349,15 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
347 atomic_read(&tcon->num_locks), 349 atomic_read(&tcon->num_locks),
348 atomic_read(&tcon->num_hardlinks), 350 atomic_read(&tcon->num_hardlinks),
349 atomic_read(&tcon->num_symlinks)); 351 atomic_read(&tcon->num_symlinks));
350 seq_printf(m, "\nOpens: %d Closes: %d" 352 seq_printf(m, "\nOpens: %d Closes: %d "
351 "Deletes: %d", 353 "Deletes: %d",
352 atomic_read(&tcon->num_opens), 354 atomic_read(&tcon->num_opens),
353 atomic_read(&tcon->num_closes), 355 atomic_read(&tcon->num_closes),
354 atomic_read(&tcon->num_deletes)); 356 atomic_read(&tcon->num_deletes));
357 seq_printf(m, "\nPosix Opens: %d "
358 "Posix Mkdirs: %d",
359 atomic_read(&tcon->num_posixopens),
360 atomic_read(&tcon->num_posixmkdirs));
355 seq_printf(m, "\nMkdirs: %d Rmdirs: %d", 361 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
356 atomic_read(&tcon->num_mkdirs), 362 atomic_read(&tcon->num_mkdirs),
357 atomic_read(&tcon->num_rmdirs)); 363 atomic_read(&tcon->num_rmdirs));
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 3bb11be8b6a..606912d8f2a 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void)
55 * i.e. strips from UNC trailing path that is not part of share 55 * i.e. strips from UNC trailing path that is not part of share
56 * name and fixup missing '\' in the begining of DFS node refferal 56 * name and fixup missing '\' in the begining of DFS node refferal
57 * if neccessary. 57 * if neccessary.
58 * Returns pointer to share name on success or NULL on error. 58 * Returns pointer to share name on success or ERR_PTR on error.
59 * Caller is responsible for freeing returned string. 59 * Caller is responsible for freeing returned string.
60 */ 60 */
61static char *cifs_get_share_name(const char *node_name) 61static char *cifs_get_share_name(const char *node_name)
@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name)
68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, 68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
69 GFP_KERNEL); 69 GFP_KERNEL);
70 if (!UNC) 70 if (!UNC)
71 return NULL; 71 return ERR_PTR(-ENOMEM);
72 72
73 /* get share name and server name */ 73 /* get share name and server name */
74 if (node_name[1] != '\\') { 74 if (node_name[1] != '\\') {
@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name)
87 cERROR(1, ("%s: no server name end in node name: %s", 87 cERROR(1, ("%s: no server name end in node name: %s",
88 __func__, node_name)); 88 __func__, node_name));
89 kfree(UNC); 89 kfree(UNC);
90 return NULL; 90 return ERR_PTR(-EINVAL);
91 } 91 }
92 92
93 /* find sharename end */ 93 /* find sharename end */
@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 *devname = cifs_get_share_name(ref->node_name); 135 *devname = cifs_get_share_name(ref->node_name);
136 if (IS_ERR(*devname)) {
137 rc = PTR_ERR(*devname);
138 *devname = NULL;
139 goto compose_mount_options_err;
140 }
141
136 rc = dns_resolve_server_name_to_ip(*devname, &srvIP); 142 rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
137 if (rc != 0) { 143 if (rc != 0) {
138 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", 144 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4a4581cb2b5..051caecf7d6 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -86,6 +86,9 @@ struct key_type cifs_spnego_key_type = {
86/* strlen of ";user=" */ 86/* strlen of ";user=" */
87#define USER_KEY_LEN 6 87#define USER_KEY_LEN 6
88 88
89/* strlen of ";pid=0x" */
90#define PID_KEY_LEN 7
91
89/* get a key struct with a SPNEGO security blob, suitable for session setup */ 92/* get a key struct with a SPNEGO security blob, suitable for session setup */
90struct key * 93struct key *
91cifs_get_spnego_key(struct cifsSesInfo *sesInfo) 94cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
@@ -103,7 +106,8 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
103 IP_KEY_LEN + INET6_ADDRSTRLEN + 106 IP_KEY_LEN + INET6_ADDRSTRLEN +
104 MAX_MECH_STR_LEN + 107 MAX_MECH_STR_LEN +
105 UID_KEY_LEN + (sizeof(uid_t) * 2) + 108 UID_KEY_LEN + (sizeof(uid_t) * 2) +
106 USER_KEY_LEN + strlen(sesInfo->userName) + 1; 109 USER_KEY_LEN + strlen(sesInfo->userName) +
110 PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
107 111
108 spnego_key = ERR_PTR(-ENOMEM); 112 spnego_key = ERR_PTR(-ENOMEM);
109 description = kzalloc(desc_len, GFP_KERNEL); 113 description = kzalloc(desc_len, GFP_KERNEL);
@@ -141,6 +145,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
141 dp = description + strlen(description); 145 dp = description + strlen(description);
142 sprintf(dp, ";user=%s", sesInfo->userName); 146 sprintf(dp, ";user=%s", sesInfo->userName);
143 147
148 dp = description + strlen(description);
149 sprintf(dp, ";pid=0x%x", current->pid);
150
144 cFYI(1, ("key description = %s", description)); 151 cFYI(1, ("key description = %s", description));
145 spnego_key = request_key(&cifs_spnego_key_type, description, ""); 152 spnego_key = request_key(&cifs_spnego_key_type, description, "");
146 153
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 60e3c4253de..714a542cbaf 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
44 int maxwords = maxbytes / 2; 44 int maxwords = maxbytes / 2;
45 char tmp[NLS_MAX_CHARSET_SIZE]; 45 char tmp[NLS_MAX_CHARSET_SIZE];
46 46
47 for (i = 0; from[i] && i < maxwords; i++) { 47 for (i = 0; i < maxwords && from[i]; i++) {
48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, 48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
49 NLS_MAX_CHARSET_SIZE); 49 NLS_MAX_CHARSET_SIZE);
50 if (charlen > 0) 50 if (charlen > 0)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1403b5d86a7..6941c22398a 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -327,7 +327,7 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
327 327
328static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, 328static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
329 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, 329 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
330 struct inode *inode) 330 struct cifs_fattr *fattr)
331{ 331{
332 int i; 332 int i;
333 int num_aces = 0; 333 int num_aces = 0;
@@ -340,7 +340,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
340 if (!pdacl) { 340 if (!pdacl) {
341 /* no DACL in the security descriptor, set 341 /* no DACL in the security descriptor, set
342 all the permissions for user/group/other */ 342 all the permissions for user/group/other */
343 inode->i_mode |= S_IRWXUGO; 343 fattr->cf_mode |= S_IRWXUGO;
344 return; 344 return;
345 } 345 }
346 346
@@ -357,7 +357,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
357 /* reset rwx permissions for user/group/other. 357 /* reset rwx permissions for user/group/other.
358 Also, if num_aces is 0 i.e. DACL has no ACEs, 358 Also, if num_aces is 0 i.e. DACL has no ACEs,
359 user/group/other have no permissions */ 359 user/group/other have no permissions */
360 inode->i_mode &= ~(S_IRWXUGO); 360 fattr->cf_mode &= ~(S_IRWXUGO);
361 361
362 acl_base = (char *)pdacl; 362 acl_base = (char *)pdacl;
363 acl_size = sizeof(struct cifs_acl); 363 acl_size = sizeof(struct cifs_acl);
@@ -379,17 +379,17 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
379 if (compare_sids(&(ppace[i]->sid), pownersid)) 379 if (compare_sids(&(ppace[i]->sid), pownersid))
380 access_flags_to_mode(ppace[i]->access_req, 380 access_flags_to_mode(ppace[i]->access_req,
381 ppace[i]->type, 381 ppace[i]->type,
382 &(inode->i_mode), 382 &fattr->cf_mode,
383 &user_mask); 383 &user_mask);
384 if (compare_sids(&(ppace[i]->sid), pgrpsid)) 384 if (compare_sids(&(ppace[i]->sid), pgrpsid))
385 access_flags_to_mode(ppace[i]->access_req, 385 access_flags_to_mode(ppace[i]->access_req,
386 ppace[i]->type, 386 ppace[i]->type,
387 &(inode->i_mode), 387 &fattr->cf_mode,
388 &group_mask); 388 &group_mask);
389 if (compare_sids(&(ppace[i]->sid), &sid_everyone)) 389 if (compare_sids(&(ppace[i]->sid), &sid_everyone))
390 access_flags_to_mode(ppace[i]->access_req, 390 access_flags_to_mode(ppace[i]->access_req,
391 ppace[i]->type, 391 ppace[i]->type,
392 &(inode->i_mode), 392 &fattr->cf_mode,
393 &other_mask); 393 &other_mask);
394 394
395/* memcpy((void *)(&(cifscred->aces[i])), 395/* memcpy((void *)(&(cifscred->aces[i])),
@@ -464,7 +464,7 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
464 464
465/* Convert CIFS ACL to POSIX form */ 465/* Convert CIFS ACL to POSIX form */
466static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, 466static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
467 struct inode *inode) 467 struct cifs_fattr *fattr)
468{ 468{
469 int rc; 469 int rc;
470 struct cifs_sid *owner_sid_ptr, *group_sid_ptr; 470 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
@@ -472,7 +472,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
472 char *end_of_acl = ((char *)pntsd) + acl_len; 472 char *end_of_acl = ((char *)pntsd) + acl_len;
473 __u32 dacloffset; 473 __u32 dacloffset;
474 474
475 if ((inode == NULL) || (pntsd == NULL)) 475 if (pntsd == NULL)
476 return -EIO; 476 return -EIO;
477 477
478 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + 478 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
@@ -497,7 +497,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
497 497
498 if (dacloffset) 498 if (dacloffset)
499 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, 499 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
500 group_sid_ptr, inode); 500 group_sid_ptr, fattr);
501 else 501 else
502 cFYI(1, ("no ACL")); /* BB grant all or default perms? */ 502 cFYI(1, ("no ACL")); /* BB grant all or default perms? */
503 503
@@ -508,7 +508,6 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
508 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr, 508 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
509 sizeof(struct cifs_sid)); */ 509 sizeof(struct cifs_sid)); */
510 510
511
512 return 0; 511 return 0;
513} 512}
514 513
@@ -671,8 +670,9 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
671} 670}
672 671
673/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 672/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
674void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode, 673void
675 const char *path, const __u16 *pfid) 674cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
675 struct inode *inode, const char *path, const __u16 *pfid)
676{ 676{
677 struct cifs_ntsd *pntsd = NULL; 677 struct cifs_ntsd *pntsd = NULL;
678 u32 acllen = 0; 678 u32 acllen = 0;
@@ -687,7 +687,7 @@ void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode,
687 687
688 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 688 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
689 if (pntsd) 689 if (pntsd)
690 rc = parse_sec_desc(pntsd, acllen, inode); 690 rc = parse_sec_desc(pntsd, acllen, fattr);
691 if (rc) 691 if (rc)
692 cFYI(1, ("parse sec desc failed rc = %d", rc)); 692 cFYI(1, ("parse sec desc failed rc = %d", rc));
693 693
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0d92114195a..84b75253b05 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -308,7 +308,6 @@ cifs_alloc_inode(struct super_block *sb)
308 if (!cifs_inode) 308 if (!cifs_inode)
309 return NULL; 309 return NULL;
310 cifs_inode->cifsAttrs = 0x20; /* default */ 310 cifs_inode->cifsAttrs = 0x20; /* default */
311 atomic_set(&cifs_inode->inUse, 0);
312 cifs_inode->time = 0; 311 cifs_inode->time = 0;
313 cifs_inode->write_behind_rc = 0; 312 cifs_inode->write_behind_rc = 0;
314 /* Until the file is open and we have gotten oplock 313 /* Until the file is open and we have gotten oplock
@@ -333,6 +332,27 @@ cifs_destroy_inode(struct inode *inode)
333 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); 332 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
334} 333}
335 334
335static void
336cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
337{
338 seq_printf(s, ",addr=");
339
340 switch (server->addr.sockAddr.sin_family) {
341 case AF_INET:
342 seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
343 break;
344 case AF_INET6:
345 seq_printf(s, "%pI6",
346 &server->addr.sockAddr6.sin6_addr.s6_addr);
347 if (server->addr.sockAddr6.sin6_scope_id)
348 seq_printf(s, "%%%u",
349 server->addr.sockAddr6.sin6_scope_id);
350 break;
351 default:
352 seq_printf(s, "(unknown)");
353 }
354}
355
336/* 356/*
337 * cifs_show_options() is for displaying mount options in /proc/mounts. 357 * cifs_show_options() is for displaying mount options in /proc/mounts.
338 * Not all settable options are displayed but most of the important 358 * Not all settable options are displayed but most of the important
@@ -343,83 +363,68 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
343{ 363{
344 struct cifs_sb_info *cifs_sb; 364 struct cifs_sb_info *cifs_sb;
345 struct cifsTconInfo *tcon; 365 struct cifsTconInfo *tcon;
346 struct TCP_Server_Info *server;
347 366
348 cifs_sb = CIFS_SB(m->mnt_sb); 367 cifs_sb = CIFS_SB(m->mnt_sb);
368 tcon = cifs_sb->tcon;
349 369
350 if (cifs_sb) { 370 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
351 tcon = cifs_sb->tcon; 371 if (tcon->ses->userName)
352 if (tcon) { 372 seq_printf(s, ",username=%s", tcon->ses->userName);
353 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); 373 if (tcon->ses->domainName)
354 if (tcon->ses) { 374 seq_printf(s, ",domain=%s", tcon->ses->domainName);
355 if (tcon->ses->userName) 375
356 seq_printf(s, ",username=%s", 376 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
357 tcon->ses->userName); 377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
358 if (tcon->ses->domainName) 378 seq_printf(s, ",forceuid");
359 seq_printf(s, ",domain=%s", 379 else
360 tcon->ses->domainName); 380 seq_printf(s, ",noforceuid");
361 server = tcon->ses->server; 381
362 if (server) { 382 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
363 seq_printf(s, ",addr="); 383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
364 switch (server->addr.sockAddr6. 384 seq_printf(s, ",forcegid");
365 sin6_family) { 385 else
366 case AF_INET6: 386 seq_printf(s, ",noforcegid");
367 seq_printf(s, "%pI6", 387
368 &server->addr.sockAddr6.sin6_addr); 388 cifs_show_address(s, tcon->ses->server);
369 break; 389
370 case AF_INET: 390 if (!tcon->unix_ext)
371 seq_printf(s, "%pI4", 391 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
372 &server->addr.sockAddr.sin_addr.s_addr);
373 break;
374 }
375 }
376 }
377 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
378 !(tcon->unix_ext))
379 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
380 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
381 !(tcon->unix_ext))
382 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
383 if (!tcon->unix_ext) {
384 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
385 cifs_sb->mnt_file_mode, 392 cifs_sb->mnt_file_mode,
386 cifs_sb->mnt_dir_mode); 393 cifs_sb->mnt_dir_mode);
387 } 394 if (tcon->seal)
388 if (tcon->seal) 395 seq_printf(s, ",seal");
389 seq_printf(s, ",seal"); 396 if (tcon->nocase)
390 if (tcon->nocase) 397 seq_printf(s, ",nocase");
391 seq_printf(s, ",nocase"); 398 if (tcon->retry)
392 if (tcon->retry) 399 seq_printf(s, ",hard");
393 seq_printf(s, ",hard"); 400 if (cifs_sb->prepath)
394 } 401 seq_printf(s, ",prepath=%s", cifs_sb->prepath);
395 if (cifs_sb->prepath) 402 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
396 seq_printf(s, ",prepath=%s", cifs_sb->prepath); 403 seq_printf(s, ",posixpaths");
397 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 404 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
398 seq_printf(s, ",posixpaths"); 405 seq_printf(s, ",setuids");
399 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 406 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
400 seq_printf(s, ",setuids"); 407 seq_printf(s, ",serverino");
401 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 408 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
402 seq_printf(s, ",serverino"); 409 seq_printf(s, ",directio");
403 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
404 seq_printf(s, ",directio"); 411 seq_printf(s, ",nouser_xattr");
405 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 412 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
406 seq_printf(s, ",nouser_xattr"); 413 seq_printf(s, ",mapchars");
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 414 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
408 seq_printf(s, ",mapchars"); 415 seq_printf(s, ",sfu");
409 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 416 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
410 seq_printf(s, ",sfu"); 417 seq_printf(s, ",nobrl");
411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 418 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
412 seq_printf(s, ",nobrl"); 419 seq_printf(s, ",cifsacl");
413 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 420 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
414 seq_printf(s, ",cifsacl"); 421 seq_printf(s, ",dynperm");
415 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 422 if (m->mnt_sb->s_flags & MS_POSIXACL)
416 seq_printf(s, ",dynperm"); 423 seq_printf(s, ",acl");
417 if (m->mnt_sb->s_flags & MS_POSIXACL) 424
418 seq_printf(s, ",acl"); 425 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
419 426 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
420 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 427
421 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
422 }
423 return 0; 428 return 0;
424} 429}
425 430
@@ -535,9 +540,14 @@ static void cifs_umount_begin(struct super_block *sb)
535 if (tcon == NULL) 540 if (tcon == NULL)
536 return; 541 return;
537 542
538 lock_kernel();
539 read_lock(&cifs_tcp_ses_lock); 543 read_lock(&cifs_tcp_ses_lock);
540 if (tcon->tc_count == 1) 544 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
545 /* we have other mounts to same share or we have
546 already tried to force umount this and woken up
547 all waiting network requests, nothing to do */
548 read_unlock(&cifs_tcp_ses_lock);
549 return;
550 } else if (tcon->tc_count == 1)
541 tcon->tidStatus = CifsExiting; 551 tcon->tidStatus = CifsExiting;
542 read_unlock(&cifs_tcp_ses_lock); 552 read_unlock(&cifs_tcp_ses_lock);
543 553
@@ -552,9 +562,7 @@ static void cifs_umount_begin(struct super_block *sb)
552 wake_up_all(&tcon->ses->server->response_q); 562 wake_up_all(&tcon->ses->server->response_q);
553 msleep(1); 563 msleep(1);
554 } 564 }
555/* BB FIXME - finish add checks for tidStatus BB */
556 565
557 unlock_kernel();
558 return; 566 return;
559} 567}
560 568
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9570a0e8023..6c170948300 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -24,6 +24,19 @@
24 24
25#define ROOT_I 2 25#define ROOT_I 2
26 26
27/*
28 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
29 * so that it will fit.
30 */
31static inline ino_t
32cifs_uniqueid_to_ino_t(u64 fileid)
33{
34 ino_t ino = (ino_t) fileid;
35 if (sizeof(ino_t) < sizeof(u64))
36 ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
37 return ino;
38}
39
27extern struct file_system_type cifs_fs_type; 40extern struct file_system_type cifs_fs_type;
28extern const struct address_space_operations cifs_addr_ops; 41extern const struct address_space_operations cifs_addr_ops;
29extern const struct address_space_operations cifs_addr_ops_smallbuf; 42extern const struct address_space_operations cifs_addr_ops_smallbuf;
@@ -100,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
100extern const struct export_operations cifs_export_ops; 113extern const struct export_operations cifs_export_ops;
101#endif /* EXPERIMENTAL */ 114#endif /* EXPERIMENTAL */
102 115
103#define CIFS_VERSION "1.59" 116#define CIFS_VERSION "1.60"
104#endif /* _CIFSFS_H */ 117#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a61ab772c6f..6084d6379c0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -83,7 +83,7 @@ enum securityEnum {
83 NTLM, /* Legacy NTLM012 auth with NTLM hash */ 83 NTLM, /* Legacy NTLM012 auth with NTLM hash */
84 NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ 84 NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
85 RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ 85 RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
86 NTLMSSP, /* NTLMSSP via SPNEGO, NTLMv2 hash */ 86/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
87 Kerberos, /* Kerberos via SPNEGO */ 87 Kerberos, /* Kerberos via SPNEGO */
88 MSKerberos, /* MS Kerberos via SPNEGO */ 88 MSKerberos, /* MS Kerberos via SPNEGO */
89}; 89};
@@ -260,6 +260,8 @@ struct cifsTconInfo {
260 atomic_t num_closes; 260 atomic_t num_closes;
261 atomic_t num_deletes; 261 atomic_t num_deletes;
262 atomic_t num_mkdirs; 262 atomic_t num_mkdirs;
263 atomic_t num_posixopens;
264 atomic_t num_posixmkdirs;
263 atomic_t num_rmdirs; 265 atomic_t num_rmdirs;
264 atomic_t num_renames; 266 atomic_t num_renames;
265 atomic_t num_t2renames; 267 atomic_t num_t2renames;
@@ -364,13 +366,13 @@ struct cifsInodeInfo {
364 struct list_head openFileList; 366 struct list_head openFileList;
365 int write_behind_rc; 367 int write_behind_rc;
366 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ 368 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
367 atomic_t inUse; /* num concurrent users (local openers cifs) of file*/
368 unsigned long time; /* jiffies of last update/check of inode */ 369 unsigned long time; /* jiffies of last update/check of inode */
369 bool clientCanCacheRead:1; /* read oplock */ 370 bool clientCanCacheRead:1; /* read oplock */
370 bool clientCanCacheAll:1; /* read and writebehind oplock */ 371 bool clientCanCacheAll:1; /* read and writebehind oplock */
371 bool oplockPending:1; 372 bool oplockPending:1;
372 bool delete_pending:1; /* DELETE_ON_CLOSE is set */ 373 bool delete_pending:1; /* DELETE_ON_CLOSE is set */
373 u64 server_eof; /* current file size on server */ 374 u64 server_eof; /* current file size on server */
375 u64 uniqueid; /* server inode number */
374 struct inode vfs_inode; 376 struct inode vfs_inode;
375}; 377};
376 378
@@ -472,6 +474,32 @@ struct dfs_info3_param {
472 char *node_name; 474 char *node_name;
473}; 475};
474 476
477/*
478 * common struct for holding inode info when searching for or updating an
479 * inode with new info
480 */
481
482#define CIFS_FATTR_DFS_REFERRAL 0x1
483#define CIFS_FATTR_DELETE_PENDING 0x2
484#define CIFS_FATTR_NEED_REVAL 0x4
485
486struct cifs_fattr {
487 u32 cf_flags;
488 u32 cf_cifsattrs;
489 u64 cf_uniqueid;
490 u64 cf_eof;
491 u64 cf_bytes;
492 uid_t cf_uid;
493 gid_t cf_gid;
494 umode_t cf_mode;
495 dev_t cf_rdev;
496 unsigned int cf_nlink;
497 unsigned int cf_dtype;
498 struct timespec cf_atime;
499 struct timespec cf_mtime;
500 struct timespec cf_ctime;
501};
502
475static inline void free_dfs_info_param(struct dfs_info3_param *param) 503static inline void free_dfs_info_param(struct dfs_info3_param *param)
476{ 504{
477 if (param) { 505 if (param) {
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index a785f69dbc9..2d07f890a84 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2328,19 +2328,7 @@ struct file_attrib_tag {
2328typedef struct { 2328typedef struct {
2329 __le32 NextEntryOffset; 2329 __le32 NextEntryOffset;
2330 __u32 ResumeKey; /* as with FileIndex - no need to convert */ 2330 __u32 ResumeKey; /* as with FileIndex - no need to convert */
2331 __le64 EndOfFile; 2331 FILE_UNIX_BASIC_INFO basic;
2332 __le64 NumOfBytes;
2333 __le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
2334 __le64 LastAccessTime;
2335 __le64 LastModificationTime;
2336 __le64 Uid;
2337 __le64 Gid;
2338 __le32 Type;
2339 __le64 DevMajor;
2340 __le64 DevMinor;
2341 __le64 UniqueId;
2342 __le64 Permissions;
2343 __le64 Nlinks;
2344 char FileName[1]; 2332 char FileName[1];
2345} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */ 2333} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
2346 2334
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f9452329bcc..da8fbf56599 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -74,7 +74,7 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr);
74extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); 74extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
75extern int decode_negTokenInit(unsigned char *security_blob, int length, 75extern int decode_negTokenInit(unsigned char *security_blob, int length,
76 enum securityEnum *secType); 76 enum securityEnum *secType);
77extern int cifs_inet_pton(const int, const char *source, void *dst); 77extern int cifs_convert_address(char *src, void *dst);
78extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); 78extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
79extern void header_assemble(struct smb_hdr *, char /* command */ , 79extern void header_assemble(struct smb_hdr *, char /* command */ ,
80 const struct cifsTconInfo *, int /* length of 80 const struct cifsTconInfo *, int /* length of
@@ -98,9 +98,13 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
98extern int cifs_posix_open(char *full_path, struct inode **pinode, 98extern int cifs_posix_open(char *full_path, struct inode **pinode,
99 struct super_block *sb, int mode, int oflags, 99 struct super_block *sb, int mode, int oflags,
100 int *poplock, __u16 *pnetfid, int xid); 100 int *poplock, __u16 *pnetfid, int xid);
101extern void posix_fill_in_inode(struct inode *tmp_inode, 101extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
102 FILE_UNIX_BASIC_INFO *pData, int isNewInode); 102 FILE_UNIX_BASIC_INFO *info,
103extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum); 103 struct cifs_sb_info *cifs_sb);
104extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
105extern struct inode *cifs_iget(struct super_block *sb,
106 struct cifs_fattr *fattr);
107
104extern int cifs_get_inode_info(struct inode **pinode, 108extern int cifs_get_inode_info(struct inode **pinode,
105 const unsigned char *search_path, 109 const unsigned char *search_path,
106 FILE_ALL_INFO *pfile_info, 110 FILE_ALL_INFO *pfile_info,
@@ -108,8 +112,9 @@ extern int cifs_get_inode_info(struct inode **pinode,
108extern int cifs_get_inode_info_unix(struct inode **pinode, 112extern int cifs_get_inode_info_unix(struct inode **pinode,
109 const unsigned char *search_path, 113 const unsigned char *search_path,
110 struct super_block *sb, int xid); 114 struct super_block *sb, int xid);
111extern void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode, 115extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
112 const char *path, const __u16 *pfid); 116 struct cifs_fattr *fattr, struct inode *inode,
117 const char *path, const __u16 *pfid);
113extern int mode_to_acl(struct inode *inode, const char *path, __u64); 118extern int mode_to_acl(struct inode *inode, const char *path, __u64);
114 119
115extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 120extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
@@ -215,7 +220,11 @@ struct cifs_unix_set_info_args {
215 dev_t device; 220 dev_t device;
216}; 221};
217 222
218extern int CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *pTcon, 223extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
224 const struct cifs_unix_set_info_args *args,
225 u16 fid, u32 pid_of_opener);
226
227extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon,
219 char *fileName, 228 char *fileName,
220 const struct cifs_unix_set_info_args *args, 229 const struct cifs_unix_set_info_args *args,
221 const struct nls_table *nls_codepage, 230 const struct nls_table *nls_codepage,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b84c61d5bca..1866bc2927d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -594,7 +594,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
594 else if (secFlags & CIFSSEC_MAY_KRB5) 594 else if (secFlags & CIFSSEC_MAY_KRB5)
595 server->secType = Kerberos; 595 server->secType = Kerberos;
596 else if (secFlags & CIFSSEC_MAY_NTLMSSP) 596 else if (secFlags & CIFSSEC_MAY_NTLMSSP)
597 server->secType = NTLMSSP; 597 server->secType = RawNTLMSSP;
598 else if (secFlags & CIFSSEC_MAY_LANMAN) 598 else if (secFlags & CIFSSEC_MAY_LANMAN)
599 server->secType = LANMAN; 599 server->secType = LANMAN;
600/* #ifdef CONFIG_CIFS_EXPERIMENTAL 600/* #ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -729,7 +729,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
729 * the tcon is no longer on the list, so no need to take lock before 729 * the tcon is no longer on the list, so no need to take lock before
730 * checking this. 730 * checking this.
731 */ 731 */
732 if (tcon->need_reconnect) 732 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
733 return 0; 733 return 0;
734 734
735 rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, 735 rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
@@ -1113,7 +1113,10 @@ PsxCreat:
1113psx_create_err: 1113psx_create_err:
1114 cifs_buf_release(pSMB); 1114 cifs_buf_release(pSMB);
1115 1115
1116 cifs_stats_inc(&tcon->num_mkdirs); 1116 if (posix_flags & SMB_O_DIRECTORY)
1117 cifs_stats_inc(&tcon->num_posixmkdirs);
1118 else
1119 cifs_stats_inc(&tcon->num_posixopens);
1117 1120
1118 if (rc == -EAGAIN) 1121 if (rc == -EAGAIN)
1119 goto PsxCreat; 1122 goto PsxCreat;
@@ -5074,10 +5077,114 @@ SetAttrLgcyRetry:
5074} 5077}
5075#endif /* temporarily unneeded SetAttr legacy function */ 5078#endif /* temporarily unneeded SetAttr legacy function */
5076 5079
5080static void
5081cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
5082 const struct cifs_unix_set_info_args *args)
5083{
5084 u64 mode = args->mode;
5085
5086 /*
5087 * Samba server ignores set of file size to zero due to bugs in some
5088 * older clients, but we should be precise - we use SetFileSize to
5089 * set file size and do not want to truncate file size to zero
5090 * accidently as happened on one Samba server beta by putting
5091 * zero instead of -1 here
5092 */
5093 data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
5094 data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
5095 data_offset->LastStatusChange = cpu_to_le64(args->ctime);
5096 data_offset->LastAccessTime = cpu_to_le64(args->atime);
5097 data_offset->LastModificationTime = cpu_to_le64(args->mtime);
5098 data_offset->Uid = cpu_to_le64(args->uid);
5099 data_offset->Gid = cpu_to_le64(args->gid);
5100 /* better to leave device as zero when it is */
5101 data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
5102 data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
5103 data_offset->Permissions = cpu_to_le64(mode);
5104
5105 if (S_ISREG(mode))
5106 data_offset->Type = cpu_to_le32(UNIX_FILE);
5107 else if (S_ISDIR(mode))
5108 data_offset->Type = cpu_to_le32(UNIX_DIR);
5109 else if (S_ISLNK(mode))
5110 data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
5111 else if (S_ISCHR(mode))
5112 data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
5113 else if (S_ISBLK(mode))
5114 data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
5115 else if (S_ISFIFO(mode))
5116 data_offset->Type = cpu_to_le32(UNIX_FIFO);
5117 else if (S_ISSOCK(mode))
5118 data_offset->Type = cpu_to_le32(UNIX_SOCKET);
5119}
5120
5077int 5121int
5078CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, 5122CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
5079 const struct cifs_unix_set_info_args *args, 5123 const struct cifs_unix_set_info_args *args,
5080 const struct nls_table *nls_codepage, int remap) 5124 u16 fid, u32 pid_of_opener)
5125{
5126 struct smb_com_transaction2_sfi_req *pSMB = NULL;
5127 FILE_UNIX_BASIC_INFO *data_offset;
5128 int rc = 0;
5129 u16 params, param_offset, offset, byte_count, count;
5130
5131 cFYI(1, ("Set Unix Info (via SetFileInfo)"));
5132 rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
5133
5134 if (rc)
5135 return rc;
5136
5137 pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
5138 pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
5139
5140 params = 6;
5141 pSMB->MaxSetupCount = 0;
5142 pSMB->Reserved = 0;
5143 pSMB->Flags = 0;
5144 pSMB->Timeout = 0;
5145 pSMB->Reserved2 = 0;
5146 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
5147 offset = param_offset + params;
5148
5149 data_offset = (FILE_UNIX_BASIC_INFO *)
5150 ((char *)(&pSMB->hdr.Protocol) + offset);
5151 count = sizeof(FILE_UNIX_BASIC_INFO);
5152
5153 pSMB->MaxParameterCount = cpu_to_le16(2);
5154 /* BB find max SMB PDU from sess */
5155 pSMB->MaxDataCount = cpu_to_le16(1000);
5156 pSMB->SetupCount = 1;
5157 pSMB->Reserved3 = 0;
5158 pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
5159 byte_count = 3 /* pad */ + params + count;
5160 pSMB->DataCount = cpu_to_le16(count);
5161 pSMB->ParameterCount = cpu_to_le16(params);
5162 pSMB->TotalDataCount = pSMB->DataCount;
5163 pSMB->TotalParameterCount = pSMB->ParameterCount;
5164 pSMB->ParameterOffset = cpu_to_le16(param_offset);
5165 pSMB->DataOffset = cpu_to_le16(offset);
5166 pSMB->Fid = fid;
5167 pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
5168 pSMB->Reserved4 = 0;
5169 pSMB->hdr.smb_buf_length += byte_count;
5170 pSMB->ByteCount = cpu_to_le16(byte_count);
5171
5172 cifs_fill_unix_set_info(data_offset, args);
5173
5174 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
5175 if (rc)
5176 cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
5177
5178 /* Note: On -EAGAIN error only caller can retry on handle based calls
5179 since file handle passed in no longer valid */
5180
5181 return rc;
5182}
5183
5184int
5185CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
5186 const struct cifs_unix_set_info_args *args,
5187 const struct nls_table *nls_codepage, int remap)
5081{ 5188{
5082 TRANSACTION2_SPI_REQ *pSMB = NULL; 5189 TRANSACTION2_SPI_REQ *pSMB = NULL;
5083 TRANSACTION2_SPI_RSP *pSMBr = NULL; 5190 TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -5086,7 +5193,6 @@ CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
5086 int bytes_returned = 0; 5193 int bytes_returned = 0;
5087 FILE_UNIX_BASIC_INFO *data_offset; 5194 FILE_UNIX_BASIC_INFO *data_offset;
5088 __u16 params, param_offset, offset, count, byte_count; 5195 __u16 params, param_offset, offset, count, byte_count;
5089 __u64 mode = args->mode;
5090 5196
5091 cFYI(1, ("In SetUID/GID/Mode")); 5197 cFYI(1, ("In SetUID/GID/Mode"));
5092setPermsRetry: 5198setPermsRetry:
@@ -5137,38 +5243,8 @@ setPermsRetry:
5137 pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); 5243 pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
5138 pSMB->Reserved4 = 0; 5244 pSMB->Reserved4 = 0;
5139 pSMB->hdr.smb_buf_length += byte_count; 5245 pSMB->hdr.smb_buf_length += byte_count;
5140 /* Samba server ignores set of file size to zero due to bugs in some
5141 older clients, but we should be precise - we use SetFileSize to
5142 set file size and do not want to truncate file size to zero
5143 accidently as happened on one Samba server beta by putting
5144 zero instead of -1 here */
5145 data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
5146 data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
5147 data_offset->LastStatusChange = cpu_to_le64(args->ctime);
5148 data_offset->LastAccessTime = cpu_to_le64(args->atime);
5149 data_offset->LastModificationTime = cpu_to_le64(args->mtime);
5150 data_offset->Uid = cpu_to_le64(args->uid);
5151 data_offset->Gid = cpu_to_le64(args->gid);
5152 /* better to leave device as zero when it is */
5153 data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
5154 data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
5155 data_offset->Permissions = cpu_to_le64(mode);
5156
5157 if (S_ISREG(mode))
5158 data_offset->Type = cpu_to_le32(UNIX_FILE);
5159 else if (S_ISDIR(mode))
5160 data_offset->Type = cpu_to_le32(UNIX_DIR);
5161 else if (S_ISLNK(mode))
5162 data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
5163 else if (S_ISCHR(mode))
5164 data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
5165 else if (S_ISBLK(mode))
5166 data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
5167 else if (S_ISFIFO(mode))
5168 data_offset->Type = cpu_to_le32(UNIX_FIFO);
5169 else if (S_ISSOCK(mode))
5170 data_offset->Type = cpu_to_le32(UNIX_SOCKET);
5171 5246
5247 cifs_fill_unix_set_info(data_offset, args);
5172 5248
5173 pSMB->ByteCount = cpu_to_le16(byte_count); 5249 pSMB->ByteCount = cpu_to_le16(byte_count);
5174 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 5250 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97f4311b9a8..1f3345d7fa7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,7 +70,6 @@ struct smb_vol {
70 mode_t file_mode; 70 mode_t file_mode;
71 mode_t dir_mode; 71 mode_t dir_mode;
72 unsigned secFlg; 72 unsigned secFlg;
73 bool rw:1;
74 bool retry:1; 73 bool retry:1;
75 bool intr:1; 74 bool intr:1;
76 bool setuids:1; 75 bool setuids:1;
@@ -804,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname,
804 char *data; 803 char *data;
805 unsigned int temp_len, i, j; 804 unsigned int temp_len, i, j;
806 char separator[2]; 805 char separator[2];
806 short int override_uid = -1;
807 short int override_gid = -1;
808 bool uid_specified = false;
809 bool gid_specified = false;
807 810
808 separator[0] = ','; 811 separator[0] = ',';
809 separator[1] = 0; 812 separator[1] = 0;
@@ -832,7 +835,6 @@ cifs_parse_mount_options(char *options, const char *devname,
832 vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; 835 vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
833 836
834 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ 837 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
835 vol->rw = true;
836 /* default is always to request posix paths. */ 838 /* default is always to request posix paths. */
837 vol->posix_paths = 1; 839 vol->posix_paths = 1;
838 /* default to using server inode numbers where available */ 840 /* default to using server inode numbers where available */
@@ -1095,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname,
1095 "too long.\n"); 1097 "too long.\n");
1096 return 1; 1098 return 1;
1097 } 1099 }
1098 } else if (strnicmp(data, "uid", 3) == 0) { 1100 } else if (!strnicmp(data, "uid", 3) && value && *value) {
1099 if (value && *value) 1101 vol->linux_uid = simple_strtoul(value, &value, 0);
1100 vol->linux_uid = 1102 uid_specified = true;
1101 simple_strtoul(value, &value, 0); 1103 } else if (!strnicmp(data, "forceuid", 8)) {
1102 } else if (strnicmp(data, "forceuid", 8) == 0) { 1104 override_uid = 1;
1103 vol->override_uid = 1; 1105 } else if (!strnicmp(data, "noforceuid", 10)) {
1104 } else if (strnicmp(data, "gid", 3) == 0) { 1106 override_uid = 0;
1105 if (value && *value) 1107 } else if (!strnicmp(data, "gid", 3) && value && *value) {
1106 vol->linux_gid = 1108 vol->linux_gid = simple_strtoul(value, &value, 0);
1107 simple_strtoul(value, &value, 0); 1109 gid_specified = true;
1108 } else if (strnicmp(data, "forcegid", 8) == 0) { 1110 } else if (!strnicmp(data, "forcegid", 8)) {
1109 vol->override_gid = 1; 1111 override_gid = 1;
1112 } else if (!strnicmp(data, "noforcegid", 10)) {
1113 override_gid = 0;
1110 } else if (strnicmp(data, "file_mode", 4) == 0) { 1114 } else if (strnicmp(data, "file_mode", 4) == 0) {
1111 if (value && *value) { 1115 if (value && *value) {
1112 vol->file_mode = 1116 vol->file_mode =
@@ -1199,7 +1203,9 @@ cifs_parse_mount_options(char *options, const char *devname,
1199 } else if (strnicmp(data, "guest", 5) == 0) { 1203 } else if (strnicmp(data, "guest", 5) == 0) {
1200 /* ignore */ 1204 /* ignore */
1201 } else if (strnicmp(data, "rw", 2) == 0) { 1205 } else if (strnicmp(data, "rw", 2) == 0) {
1202 vol->rw = true; 1206 /* ignore */
1207 } else if (strnicmp(data, "ro", 2) == 0) {
1208 /* ignore */
1203 } else if (strnicmp(data, "noblocksend", 11) == 0) { 1209 } else if (strnicmp(data, "noblocksend", 11) == 0) {
1204 vol->noblocksnd = 1; 1210 vol->noblocksnd = 1;
1205 } else if (strnicmp(data, "noautotune", 10) == 0) { 1211 } else if (strnicmp(data, "noautotune", 10) == 0) {
@@ -1218,8 +1224,6 @@ cifs_parse_mount_options(char *options, const char *devname,
1218 parse these options again and set anything and it 1224 parse these options again and set anything and it
1219 is ok to just ignore them */ 1225 is ok to just ignore them */
1220 continue; 1226 continue;
1221 } else if (strnicmp(data, "ro", 2) == 0) {
1222 vol->rw = false;
1223 } else if (strnicmp(data, "hard", 4) == 0) { 1227 } else if (strnicmp(data, "hard", 4) == 0) {
1224 vol->retry = 1; 1228 vol->retry = 1;
1225 } else if (strnicmp(data, "soft", 4) == 0) { 1229 } else if (strnicmp(data, "soft", 4) == 0) {
@@ -1357,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname,
1357 if (vol->UNCip == NULL) 1361 if (vol->UNCip == NULL)
1358 vol->UNCip = &vol->UNC[2]; 1362 vol->UNCip = &vol->UNC[2];
1359 1363
1364 if (uid_specified)
1365 vol->override_uid = override_uid;
1366 else if (override_uid == 1)
1367 printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
1368 "specified with no uid= option.\n");
1369
1370 if (gid_specified)
1371 vol->override_gid = override_gid;
1372 else if (override_gid == 1)
1373 printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
1374 "specified with no gid= option.\n");
1375
1360 return 0; 1376 return 0;
1361} 1377}
1362 1378
@@ -1386,8 +1402,10 @@ cifs_find_tcp_session(struct sockaddr_storage *addr)
1386 server->addr.sockAddr.sin_addr.s_addr)) 1402 server->addr.sockAddr.sin_addr.s_addr))
1387 continue; 1403 continue;
1388 else if (addr->ss_family == AF_INET6 && 1404 else if (addr->ss_family == AF_INET6 &&
1389 !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr, 1405 (!ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
1390 &addr6->sin6_addr)) 1406 &addr6->sin6_addr) ||
1407 server->addr.sockAddr6.sin6_scope_id !=
1408 addr6->sin6_scope_id))
1391 continue; 1409 continue;
1392 1410
1393 ++server->srv_count; 1411 ++server->srv_count;
@@ -1433,28 +1451,15 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1433 1451
1434 memset(&addr, 0, sizeof(struct sockaddr_storage)); 1452 memset(&addr, 0, sizeof(struct sockaddr_storage));
1435 1453
1436 if (volume_info->UNCip && volume_info->UNC) { 1454 cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
1437 rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
1438 &sin_server->sin_addr.s_addr);
1439
1440 if (rc <= 0) {
1441 /* not ipv4 address, try ipv6 */
1442 rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
1443 &sin_server6->sin6_addr.in6_u);
1444 if (rc > 0)
1445 addr.ss_family = AF_INET6;
1446 } else {
1447 addr.ss_family = AF_INET;
1448 }
1449 1455
1450 if (rc <= 0) { 1456 if (volume_info->UNCip && volume_info->UNC) {
1457 rc = cifs_convert_address(volume_info->UNCip, &addr);
1458 if (!rc) {
1451 /* we failed translating address */ 1459 /* we failed translating address */
1452 rc = -EINVAL; 1460 rc = -EINVAL;
1453 goto out_err; 1461 goto out_err;
1454 } 1462 }
1455
1456 cFYI(1, ("UNC: %s ip: %s", volume_info->UNC,
1457 volume_info->UNCip));
1458 } else if (volume_info->UNCip) { 1463 } else if (volume_info->UNCip) {
1459 /* BB using ip addr as tcp_ses name to connect to the 1464 /* BB using ip addr as tcp_ses name to connect to the
1460 DFS root below */ 1465 DFS root below */
@@ -1513,14 +1518,14 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1513 cFYI(1, ("attempting ipv6 connect")); 1518 cFYI(1, ("attempting ipv6 connect"));
1514 /* BB should we allow ipv6 on port 139? */ 1519 /* BB should we allow ipv6 on port 139? */
1515 /* other OS never observed in Wild doing 139 with v6 */ 1520 /* other OS never observed in Wild doing 139 with v6 */
1521 sin_server6->sin6_port = htons(volume_info->port);
1516 memcpy(&tcp_ses->addr.sockAddr6, sin_server6, 1522 memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
1517 sizeof(struct sockaddr_in6)); 1523 sizeof(struct sockaddr_in6));
1518 sin_server6->sin6_port = htons(volume_info->port);
1519 rc = ipv6_connect(tcp_ses); 1524 rc = ipv6_connect(tcp_ses);
1520 } else { 1525 } else {
1526 sin_server->sin_port = htons(volume_info->port);
1521 memcpy(&tcp_ses->addr.sockAddr, sin_server, 1527 memcpy(&tcp_ses->addr.sockAddr, sin_server,
1522 sizeof(struct sockaddr_in)); 1528 sizeof(struct sockaddr_in));
1523 sin_server->sin_port = htons(volume_info->port);
1524 rc = ipv4_connect(tcp_ses); 1529 rc = ipv4_connect(tcp_ses);
1525 } 1530 }
1526 if (rc < 0) { 1531 if (rc < 0) {
@@ -2465,10 +2470,10 @@ try_mount_again:
2465 tcon->local_lease = volume_info->local_lease; 2470 tcon->local_lease = volume_info->local_lease;
2466 } 2471 }
2467 if (pSesInfo) { 2472 if (pSesInfo) {
2468 if (pSesInfo->capabilities & CAP_LARGE_FILES) { 2473 if (pSesInfo->capabilities & CAP_LARGE_FILES)
2469 sb->s_maxbytes = (u64) 1 << 63; 2474 sb->s_maxbytes = MAX_LFS_FILESIZE;
2470 } else 2475 else
2471 sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */ 2476 sb->s_maxbytes = MAX_NON_LFS;
2472 } 2477 }
2473 2478
2474 /* BB FIXME fix time_gran to be larger for LANMAN sessions */ 2479 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
@@ -2557,11 +2562,20 @@ remote_path_check:
2557 2562
2558 if (mount_data != mount_data_global) 2563 if (mount_data != mount_data_global)
2559 kfree(mount_data); 2564 kfree(mount_data);
2565
2560 mount_data = cifs_compose_mount_options( 2566 mount_data = cifs_compose_mount_options(
2561 cifs_sb->mountdata, full_path + 1, 2567 cifs_sb->mountdata, full_path + 1,
2562 referrals, &fake_devname); 2568 referrals, &fake_devname);
2563 kfree(fake_devname); 2569
2564 free_dfs_info_array(referrals, num_referrals); 2570 free_dfs_info_array(referrals, num_referrals);
2571 kfree(fake_devname);
2572 kfree(full_path);
2573
2574 if (IS_ERR(mount_data)) {
2575 rc = PTR_ERR(mount_data);
2576 mount_data = NULL;
2577 goto mount_fail_check;
2578 }
2565 2579
2566 if (tcon) 2580 if (tcon)
2567 cifs_put_tcon(tcon); 2581 cifs_put_tcon(tcon);
@@ -2569,8 +2583,6 @@ remote_path_check:
2569 cifs_put_smb_ses(pSesInfo); 2583 cifs_put_smb_ses(pSesInfo);
2570 2584
2571 cleanup_volume_info(&volume_info); 2585 cleanup_volume_info(&volume_info);
2572 FreeXid(xid);
2573 kfree(full_path);
2574 referral_walks_count++; 2586 referral_walks_count++;
2575 goto try_mount_again; 2587 goto try_mount_again;
2576 } 2588 }
@@ -2739,6 +2751,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
2739 strncpy(tcon->treeName, tree, MAX_TREE_SIZE); 2751 strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
2740 2752
2741 /* mostly informational -- no need to fail on error here */ 2753 /* mostly informational -- no need to fail on error here */
2754 kfree(tcon->nativeFileSystem);
2742 tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, 2755 tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
2743 bytes_left, is_unicode, 2756 bytes_left, is_unicode,
2744 nls_codepage); 2757 nls_codepage);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3758965d73d..4326ffd90fa 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -188,6 +188,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
188 FILE_UNIX_BASIC_INFO *presp_data; 188 FILE_UNIX_BASIC_INFO *presp_data;
189 __u32 posix_flags = 0; 189 __u32 posix_flags = 0;
190 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 190 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
191 struct cifs_fattr fattr;
191 192
192 cFYI(1, ("posix open %s", full_path)); 193 cFYI(1, ("posix open %s", full_path));
193 194
@@ -236,22 +237,21 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
236 if (presp_data->Type == cpu_to_le32(-1)) 237 if (presp_data->Type == cpu_to_le32(-1))
237 goto posix_open_ret; /* open ok, caller does qpathinfo */ 238 goto posix_open_ret; /* open ok, caller does qpathinfo */
238 239
239 /* get new inode and set it up */
240 if (!pinode) 240 if (!pinode)
241 goto posix_open_ret; /* caller does not need info */ 241 goto posix_open_ret; /* caller does not need info */
242 242
243 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
244
245 /* get new inode and set it up */
243 if (*pinode == NULL) { 246 if (*pinode == NULL) {
244 __u64 unique_id = le64_to_cpu(presp_data->UniqueId); 247 *pinode = cifs_iget(sb, &fattr);
245 *pinode = cifs_new_inode(sb, &unique_id); 248 if (!*pinode) {
249 rc = -ENOMEM;
250 goto posix_open_ret;
251 }
252 } else {
253 cifs_fattr_to_inode(*pinode, &fattr);
246 } 254 }
247 /* else an inode was passed in. Update its info, don't create one */
248
249 /* We do not need to close the file if new_inode fails since
250 the caller will retry qpathinfo as long as inode is null */
251 if (*pinode == NULL)
252 goto posix_open_ret;
253
254 posix_fill_in_inode(*pinode, presp_data, 1);
255 255
256 cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only); 256 cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
257 257
@@ -307,8 +307,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
307 307
308 full_path = build_path_from_dentry(direntry); 308 full_path = build_path_from_dentry(direntry);
309 if (full_path == NULL) { 309 if (full_path == NULL) {
310 rc = -ENOMEM;
310 FreeXid(xid); 311 FreeXid(xid);
311 return -ENOMEM; 312 return rc;
312 } 313 }
313 314
314 if (oplockEnabled) 315 if (oplockEnabled)
@@ -424,9 +425,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
424 args.uid = NO_CHANGE_64; 425 args.uid = NO_CHANGE_64;
425 args.gid = NO_CHANGE_64; 426 args.gid = NO_CHANGE_64;
426 } 427 }
427 CIFSSMBUnixSetInfo(xid, tcon, full_path, &args, 428 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
428 cifs_sb->local_nls, 429 cifs_sb->local_nls,
429 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 430 cifs_sb->mnt_cifs_flags &
431 CIFS_MOUNT_MAP_SPECIAL_CHR);
430 } else { 432 } else {
431 /* BB implement mode setting via Windows security 433 /* BB implement mode setting via Windows security
432 descriptors e.g. */ 434 descriptors e.g. */
@@ -514,10 +516,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
514 args.uid = NO_CHANGE_64; 516 args.uid = NO_CHANGE_64;
515 args.gid = NO_CHANGE_64; 517 args.gid = NO_CHANGE_64;
516 } 518 }
517 rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path, 519 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
518 &args, cifs_sb->local_nls, 520 cifs_sb->local_nls,
519 cifs_sb->mnt_cifs_flags & 521 cifs_sb->mnt_cifs_flags &
520 CIFS_MOUNT_MAP_SPECIAL_CHR); 522 CIFS_MOUNT_MAP_SPECIAL_CHR);
521 523
522 if (!rc) { 524 if (!rc) {
523 rc = cifs_get_inode_info_unix(&newinode, full_path, 525 rc = cifs_get_inode_info_unix(&newinode, full_path,
@@ -540,8 +542,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
540 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 542 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
541 if (buf == NULL) { 543 if (buf == NULL) {
542 kfree(full_path); 544 kfree(full_path);
545 rc = -ENOMEM;
543 FreeXid(xid); 546 FreeXid(xid);
544 return -ENOMEM; 547 return rc;
545 } 548 }
546 549
547 rc = CIFSSMBOpen(xid, pTcon, full_path, 550 rc = CIFSSMBOpen(xid, pTcon, full_path,
@@ -641,6 +644,15 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
641 } 644 }
642 } 645 }
643 646
647 /*
648 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
649 * the VFS handle the create.
650 */
651 if (nd->flags & LOOKUP_EXCL) {
652 d_instantiate(direntry, NULL);
653 return 0;
654 }
655
644 /* can not grab the rename sem here since it would 656 /* can not grab the rename sem here since it would
645 deadlock in the cases (beginning of sys_rename itself) 657 deadlock in the cases (beginning of sys_rename itself)
646 in which we already have the sb rename sem */ 658 in which we already have the sb rename sem */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index df4a306f697..87948147d7e 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -35,26 +35,11 @@
35 * 0 - name is not IP 35 * 0 - name is not IP
36 */ 36 */
37static int 37static int
38is_ip(const char *name) 38is_ip(char *name)
39{ 39{
40 int rc; 40 struct sockaddr_storage ss;
41 struct sockaddr_in sin_server; 41
42 struct sockaddr_in6 sin_server6; 42 return cifs_convert_address(name, &ss);
43
44 rc = cifs_inet_pton(AF_INET, name,
45 &sin_server.sin_addr.s_addr);
46
47 if (rc <= 0) {
48 /* not ipv4 address, try ipv6 */
49 rc = cifs_inet_pton(AF_INET6, name,
50 &sin_server6.sin6_addr.in6_u);
51 if (rc > 0)
52 return 1;
53 } else {
54 return 1;
55 }
56 /* we failed translating address */
57 return 0;
58} 43}
59 44
60static int 45static int
@@ -72,7 +57,7 @@ dns_resolver_instantiate(struct key *key, const void *data,
72 ip[datalen] = '\0'; 57 ip[datalen] = '\0';
73 58
74 /* make sure this looks like an address */ 59 /* make sure this looks like an address */
75 if (!is_ip((const char *) ip)) { 60 if (!is_ip(ip)) {
76 kfree(ip); 61 kfree(ip);
77 return -EINVAL; 62 return -EINVAL;
78 } 63 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 06866841b97..c34b7f8a217 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,14 +300,16 @@ int cifs_open(struct inode *inode, struct file *file)
300 pCifsInode = CIFS_I(file->f_path.dentry->d_inode); 300 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
301 pCifsFile = cifs_fill_filedata(file); 301 pCifsFile = cifs_fill_filedata(file);
302 if (pCifsFile) { 302 if (pCifsFile) {
303 rc = 0;
303 FreeXid(xid); 304 FreeXid(xid);
304 return 0; 305 return rc;
305 } 306 }
306 307
307 full_path = build_path_from_dentry(file->f_path.dentry); 308 full_path = build_path_from_dentry(file->f_path.dentry);
308 if (full_path == NULL) { 309 if (full_path == NULL) {
310 rc = -ENOMEM;
309 FreeXid(xid); 311 FreeXid(xid);
310 return -ENOMEM; 312 return rc;
311 } 313 }
312 314
313 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s", 315 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
@@ -446,9 +448,9 @@ int cifs_open(struct inode *inode, struct file *file)
446 .mtime = NO_CHANGE_64, 448 .mtime = NO_CHANGE_64,
447 .device = 0, 449 .device = 0,
448 }; 450 };
449 CIFSSMBUnixSetInfo(xid, tcon, full_path, &args, 451 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
450 cifs_sb->local_nls, 452 cifs_sb->local_nls,
451 cifs_sb->mnt_cifs_flags & 453 cifs_sb->mnt_cifs_flags &
452 CIFS_MOUNT_MAP_SPECIAL_CHR); 454 CIFS_MOUNT_MAP_SPECIAL_CHR);
453 } 455 }
454 } 456 }
@@ -491,11 +493,12 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
491 return -EBADF; 493 return -EBADF;
492 494
493 xid = GetXid(); 495 xid = GetXid();
494 mutex_unlock(&pCifsFile->fh_mutex); 496 mutex_lock(&pCifsFile->fh_mutex);
495 if (!pCifsFile->invalidHandle) { 497 if (!pCifsFile->invalidHandle) {
496 mutex_lock(&pCifsFile->fh_mutex); 498 mutex_unlock(&pCifsFile->fh_mutex);
499 rc = 0;
497 FreeXid(xid); 500 FreeXid(xid);
498 return 0; 501 return rc;
499 } 502 }
500 503
501 if (file->f_path.dentry == NULL) { 504 if (file->f_path.dentry == NULL) {
@@ -524,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
524 if (full_path == NULL) { 527 if (full_path == NULL) {
525 rc = -ENOMEM; 528 rc = -ENOMEM;
526reopen_error_exit: 529reopen_error_exit:
527 mutex_lock(&pCifsFile->fh_mutex); 530 mutex_unlock(&pCifsFile->fh_mutex);
528 FreeXid(xid); 531 FreeXid(xid);
529 return rc; 532 return rc;
530 } 533 }
@@ -566,14 +569,14 @@ reopen_error_exit:
566 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 569 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
567 CIFS_MOUNT_MAP_SPECIAL_CHR); 570 CIFS_MOUNT_MAP_SPECIAL_CHR);
568 if (rc) { 571 if (rc) {
569 mutex_lock(&pCifsFile->fh_mutex); 572 mutex_unlock(&pCifsFile->fh_mutex);
570 cFYI(1, ("cifs_open returned 0x%x", rc)); 573 cFYI(1, ("cifs_open returned 0x%x", rc));
571 cFYI(1, ("oplock: %d", oplock)); 574 cFYI(1, ("oplock: %d", oplock));
572 } else { 575 } else {
573reopen_success: 576reopen_success:
574 pCifsFile->netfid = netfid; 577 pCifsFile->netfid = netfid;
575 pCifsFile->invalidHandle = false; 578 pCifsFile->invalidHandle = false;
576 mutex_lock(&pCifsFile->fh_mutex); 579 mutex_unlock(&pCifsFile->fh_mutex);
577 pCifsInode = CIFS_I(inode); 580 pCifsInode = CIFS_I(inode);
578 if (pCifsInode) { 581 if (pCifsInode) {
579 if (can_flush) { 582 if (can_flush) {
@@ -845,8 +848,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
845 tcon = cifs_sb->tcon; 848 tcon = cifs_sb->tcon;
846 849
847 if (file->private_data == NULL) { 850 if (file->private_data == NULL) {
851 rc = -EBADF;
848 FreeXid(xid); 852 FreeXid(xid);
849 return -EBADF; 853 return rc;
850 } 854 }
851 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 855 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
852 856
@@ -1805,8 +1809,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1805 pTcon = cifs_sb->tcon; 1809 pTcon = cifs_sb->tcon;
1806 1810
1807 if (file->private_data == NULL) { 1811 if (file->private_data == NULL) {
1812 rc = -EBADF;
1808 FreeXid(xid); 1813 FreeXid(xid);
1809 return -EBADF; 1814 return rc;
1810 } 1815 }
1811 open_file = (struct cifsFileInfo *)file->private_data; 1816 open_file = (struct cifsFileInfo *)file->private_data;
1812 1817
@@ -1885,8 +1890,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1885 pTcon = cifs_sb->tcon; 1890 pTcon = cifs_sb->tcon;
1886 1891
1887 if (file->private_data == NULL) { 1892 if (file->private_data == NULL) {
1893 rc = -EBADF;
1888 FreeXid(xid); 1894 FreeXid(xid);
1889 return -EBADF; 1895 return rc;
1890 } 1896 }
1891 open_file = (struct cifsFileInfo *)file->private_data; 1897 open_file = (struct cifsFileInfo *)file->private_data;
1892 1898
@@ -2019,8 +2025,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2019 2025
2020 xid = GetXid(); 2026 xid = GetXid();
2021 if (file->private_data == NULL) { 2027 if (file->private_data == NULL) {
2028 rc = -EBADF;
2022 FreeXid(xid); 2029 FreeXid(xid);
2023 return -EBADF; 2030 return rc;
2024 } 2031 }
2025 open_file = (struct cifsFileInfo *)file->private_data; 2032 open_file = (struct cifsFileInfo *)file->private_data;
2026 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2033 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -2185,8 +2192,9 @@ static int cifs_readpage(struct file *file, struct page *page)
2185 xid = GetXid(); 2192 xid = GetXid();
2186 2193
2187 if (file->private_data == NULL) { 2194 if (file->private_data == NULL) {
2195 rc = -EBADF;
2188 FreeXid(xid); 2196 FreeXid(xid);
2189 return -EBADF; 2197 return rc;
2190 } 2198 }
2191 2199
2192 cFYI(1, ("readpage %p at offset %d 0x%x\n", 2200 cFYI(1, ("readpage %p at offset %d 0x%x\n",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fad882b075b..82d83839655 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -77,239 +77,202 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
77 } 77 }
78} 78}
79 79
80static void cifs_unix_info_to_inode(struct inode *inode, 80/* populate an inode with info from a cifs_fattr struct */
81 FILE_UNIX_BASIC_INFO *info, int force_uid_gid) 81void
82cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
82{ 83{
84 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
83 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 85 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
84 struct cifsInodeInfo *cifsInfo = CIFS_I(inode); 86 unsigned long oldtime = cifs_i->time;
85 __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes); 87
86 __u64 end_of_file = le64_to_cpu(info->EndOfFile); 88 inode->i_atime = fattr->cf_atime;
89 inode->i_mtime = fattr->cf_mtime;
90 inode->i_ctime = fattr->cf_ctime;
91 inode->i_rdev = fattr->cf_rdev;
92 inode->i_nlink = fattr->cf_nlink;
93 inode->i_uid = fattr->cf_uid;
94 inode->i_gid = fattr->cf_gid;
95
96 /* if dynperm is set, don't clobber existing mode */
97 if (inode->i_state & I_NEW ||
98 !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
99 inode->i_mode = fattr->cf_mode;
100
101 cifs_i->cifsAttrs = fattr->cf_cifsattrs;
102 cifs_i->uniqueid = fattr->cf_uniqueid;
103
104 if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
105 cifs_i->time = 0;
106 else
107 cifs_i->time = jiffies;
108
109 cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode,
110 oldtime, cifs_i->time));
87 111
88 inode->i_atime = cifs_NTtimeToUnix(info->LastAccessTime); 112 cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
89 inode->i_mtime = 113
90 cifs_NTtimeToUnix(info->LastModificationTime); 114 /*
91 inode->i_ctime = cifs_NTtimeToUnix(info->LastStatusChange); 115 * Can't safely change the file size here if the client is writing to
92 inode->i_mode = le64_to_cpu(info->Permissions); 116 * it due to potential races.
117 */
118 spin_lock(&inode->i_lock);
119 if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
120 i_size_write(inode, fattr->cf_eof);
121
122 /*
123 * i_blocks is not related to (i_size / i_blksize),
124 * but instead 512 byte (2**9) size is required for
125 * calculating num blocks.
126 */
127 inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
128 }
129 spin_unlock(&inode->i_lock);
130
131 cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
132}
133
134/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
135void
136cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
137 struct cifs_sb_info *cifs_sb)
138{
139 memset(fattr, 0, sizeof(*fattr));
140 fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
141 fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
142 fattr->cf_eof = le64_to_cpu(info->EndOfFile);
143
144 fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
145 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
146 fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
147 fattr->cf_mode = le64_to_cpu(info->Permissions);
93 148
94 /* 149 /*
95 * Since we set the inode type below we need to mask off 150 * Since we set the inode type below we need to mask off
96 * to avoid strange results if bits set above. 151 * to avoid strange results if bits set above.
97 */ 152 */
98 inode->i_mode &= ~S_IFMT; 153 fattr->cf_mode &= ~S_IFMT;
99 switch (le32_to_cpu(info->Type)) { 154 switch (le32_to_cpu(info->Type)) {
100 case UNIX_FILE: 155 case UNIX_FILE:
101 inode->i_mode |= S_IFREG; 156 fattr->cf_mode |= S_IFREG;
157 fattr->cf_dtype = DT_REG;
102 break; 158 break;
103 case UNIX_SYMLINK: 159 case UNIX_SYMLINK:
104 inode->i_mode |= S_IFLNK; 160 fattr->cf_mode |= S_IFLNK;
161 fattr->cf_dtype = DT_LNK;
105 break; 162 break;
106 case UNIX_DIR: 163 case UNIX_DIR:
107 inode->i_mode |= S_IFDIR; 164 fattr->cf_mode |= S_IFDIR;
165 fattr->cf_dtype = DT_DIR;
108 break; 166 break;
109 case UNIX_CHARDEV: 167 case UNIX_CHARDEV:
110 inode->i_mode |= S_IFCHR; 168 fattr->cf_mode |= S_IFCHR;
111 inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), 169 fattr->cf_dtype = DT_CHR;
112 le64_to_cpu(info->DevMinor) & MINORMASK); 170 fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
171 le64_to_cpu(info->DevMinor) & MINORMASK);
113 break; 172 break;
114 case UNIX_BLOCKDEV: 173 case UNIX_BLOCKDEV:
115 inode->i_mode |= S_IFBLK; 174 fattr->cf_mode |= S_IFBLK;
116 inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), 175 fattr->cf_dtype = DT_BLK;
117 le64_to_cpu(info->DevMinor) & MINORMASK); 176 fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
177 le64_to_cpu(info->DevMinor) & MINORMASK);
118 break; 178 break;
119 case UNIX_FIFO: 179 case UNIX_FIFO:
120 inode->i_mode |= S_IFIFO; 180 fattr->cf_mode |= S_IFIFO;
181 fattr->cf_dtype = DT_FIFO;
121 break; 182 break;
122 case UNIX_SOCKET: 183 case UNIX_SOCKET:
123 inode->i_mode |= S_IFSOCK; 184 fattr->cf_mode |= S_IFSOCK;
185 fattr->cf_dtype = DT_SOCK;
124 break; 186 break;
125 default: 187 default:
126 /* safest to call it a file if we do not know */ 188 /* safest to call it a file if we do not know */
127 inode->i_mode |= S_IFREG; 189 fattr->cf_mode |= S_IFREG;
190 fattr->cf_dtype = DT_REG;
128 cFYI(1, ("unknown type %d", le32_to_cpu(info->Type))); 191 cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
129 break; 192 break;
130 } 193 }
131 194
132 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) && 195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
133 !force_uid_gid) 196 fattr->cf_uid = cifs_sb->mnt_uid;
134 inode->i_uid = cifs_sb->mnt_uid;
135 else 197 else
136 inode->i_uid = le64_to_cpu(info->Uid); 198 fattr->cf_uid = le64_to_cpu(info->Uid);
137 199
138 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) && 200 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
139 !force_uid_gid) 201 fattr->cf_gid = cifs_sb->mnt_gid;
140 inode->i_gid = cifs_sb->mnt_gid;
141 else 202 else
142 inode->i_gid = le64_to_cpu(info->Gid); 203 fattr->cf_gid = le64_to_cpu(info->Gid);
143
144 inode->i_nlink = le64_to_cpu(info->Nlinks);
145
146 cifsInfo->server_eof = end_of_file;
147 spin_lock(&inode->i_lock);
148 if (is_size_safe_to_change(cifsInfo, end_of_file)) {
149 /*
150 * We can not safely change the file size here if the client
151 * is writing to it due to potential races.
152 */
153 i_size_write(inode, end_of_file);
154 204
155 /* 205 fattr->cf_nlink = le64_to_cpu(info->Nlinks);
156 * i_blocks is not related to (i_size / i_blksize),
157 * but instead 512 byte (2**9) size is required for
158 * calculating num blocks.
159 */
160 inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
161 }
162 spin_unlock(&inode->i_lock);
163} 206}
164 207
165
166/* 208/*
167 * Needed to setup inode data for the directory which is the 209 * Fill a cifs_fattr struct with fake inode info.
168 * junction to the new submount (ie to setup the fake directory
169 * which represents a DFS referral)
170 */
171static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat,
172 struct super_block *sb)
173{
174 struct inode *pinode = NULL;
175
176 memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO));
177
178/* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
179 __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0);
180 __u64 UniqueId = 0; */
181 pfnd_dat->LastStatusChange =
182 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
183 pfnd_dat->LastAccessTime =
184 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
185 pfnd_dat->LastModificationTime =
186 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
187 pfnd_dat->Type = cpu_to_le32(UNIX_DIR);
188 pfnd_dat->Permissions = cpu_to_le64(S_IXUGO | S_IRWXU);
189 pfnd_dat->Nlinks = cpu_to_le64(2);
190 if (sb->s_root)
191 pinode = sb->s_root->d_inode;
192 if (pinode == NULL)
193 return;
194
195 /* fill in default values for the remaining based on root
196 inode since we can not query the server for this inode info */
197 pfnd_dat->DevMajor = cpu_to_le64(MAJOR(pinode->i_rdev));
198 pfnd_dat->DevMinor = cpu_to_le64(MINOR(pinode->i_rdev));
199 pfnd_dat->Uid = cpu_to_le64(pinode->i_uid);
200 pfnd_dat->Gid = cpu_to_le64(pinode->i_gid);
201}
202
203/**
204 * cifs_new inode - create new inode, initialize, and hash it
205 * @sb - pointer to superblock
206 * @inum - if valid pointer and serverino is enabled, replace i_ino with val
207 *
208 * Create a new inode, initialize it for CIFS and hash it. Returns the new
209 * inode or NULL if one couldn't be allocated.
210 * 210 *
211 * If the share isn't mounted with "serverino" or inum is a NULL pointer then 211 * Needed to setup cifs_fattr data for the directory which is the
212 * we'll just use the inode number assigned by new_inode(). Note that this can 212 * junction to the new submount (ie to setup the fake directory
213 * mean i_ino collisions since the i_ino assigned by new_inode is not 213 * which represents a DFS referral).
214 * guaranteed to be unique.
215 */ 214 */
216struct inode * 215static void
217cifs_new_inode(struct super_block *sb, __u64 *inum) 216cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
218{ 217{
219 struct inode *inode; 218 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
220
221 inode = new_inode(sb);
222 if (inode == NULL)
223 return NULL;
224
225 /*
226 * BB: Is i_ino == 0 legal? Here, we assume that it is. If it isn't we
227 * stop passing inum as ptr. Are there sanity checks we can use to
228 * ensure that the server is really filling in that field? Also,
229 * if serverino is disabled, perhaps we should be using iunique()?
230 */
231 if (inum && (CIFS_SB(sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
232 inode->i_ino = (unsigned long) *inum;
233
234 /*
235 * must set this here instead of cifs_alloc_inode since VFS will
236 * clobber i_flags
237 */
238 if (sb->s_flags & MS_NOATIME)
239 inode->i_flags |= S_NOATIME | S_NOCMTIME;
240
241 insert_inode_hash(inode);
242 219
243 return inode; 220 cFYI(1, ("creating fake fattr for DFS referral"));
221
222 memset(fattr, 0, sizeof(*fattr));
223 fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
224 fattr->cf_uid = cifs_sb->mnt_uid;
225 fattr->cf_gid = cifs_sb->mnt_gid;
226 fattr->cf_atime = CURRENT_TIME;
227 fattr->cf_ctime = CURRENT_TIME;
228 fattr->cf_mtime = CURRENT_TIME;
229 fattr->cf_nlink = 2;
230 fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
244} 231}
245 232
246int cifs_get_inode_info_unix(struct inode **pinode, 233int cifs_get_inode_info_unix(struct inode **pinode,
247 const unsigned char *full_path, struct super_block *sb, int xid) 234 const unsigned char *full_path,
235 struct super_block *sb, int xid)
248{ 236{
249 int rc = 0; 237 int rc;
250 FILE_UNIX_BASIC_INFO find_data; 238 FILE_UNIX_BASIC_INFO find_data;
251 struct cifsTconInfo *pTcon; 239 struct cifs_fattr fattr;
252 struct inode *inode; 240 struct cifsTconInfo *tcon;
253 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 241 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
254 bool is_dfs_referral = false;
255 struct cifsInodeInfo *cifsInfo;
256 __u64 num_of_bytes;
257 __u64 end_of_file;
258 242
259 pTcon = cifs_sb->tcon; 243 tcon = cifs_sb->tcon;
260 cFYI(1, ("Getting info on %s", full_path)); 244 cFYI(1, ("Getting info on %s", full_path));
261 245
262 /* could have done a find first instead but this returns more info */ 246 /* could have done a find first instead but this returns more info */
263 rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data, 247 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
264 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 248 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
265 CIFS_MOUNT_MAP_SPECIAL_CHR); 249 CIFS_MOUNT_MAP_SPECIAL_CHR);
266 if (rc == -EREMOTE && !is_dfs_referral) {
267 is_dfs_referral = true;
268 cFYI(DBG2, ("DFS ref"));
269 /* for DFS, server does not give us real inode data */
270 fill_fake_finddataunix(&find_data, sb);
271 rc = 0;
272 } else if (rc)
273 goto cgiiu_exit;
274 250
275 num_of_bytes = le64_to_cpu(find_data.NumOfBytes); 251 if (!rc) {
276 end_of_file = le64_to_cpu(find_data.EndOfFile); 252 cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
253 } else if (rc == -EREMOTE) {
254 cifs_create_dfs_fattr(&fattr, sb);
255 rc = 0;
256 } else {
257 return rc;
258 }
277 259
278 /* get new inode */
279 if (*pinode == NULL) { 260 if (*pinode == NULL) {
280 __u64 unique_id = le64_to_cpu(find_data.UniqueId); 261 /* get new inode */
281 *pinode = cifs_new_inode(sb, &unique_id); 262 *pinode = cifs_iget(sb, &fattr);
282 if (*pinode == NULL) { 263 if (!*pinode)
283 rc = -ENOMEM; 264 rc = -ENOMEM;
284 goto cgiiu_exit; 265 } else {
285 } 266 /* we already have inode, update it */
267 cifs_fattr_to_inode(*pinode, &fattr);
286 } 268 }
287 269
288 inode = *pinode;
289 cifsInfo = CIFS_I(inode);
290
291 cFYI(1, ("Old time %ld", cifsInfo->time));
292 cifsInfo->time = jiffies;
293 cFYI(1, ("New time %ld", cifsInfo->time));
294 /* this is ok to set on every inode revalidate */
295 atomic_set(&cifsInfo->inUse, 1);
296
297 cifs_unix_info_to_inode(inode, &find_data, 0);
298
299 if (num_of_bytes < end_of_file)
300 cFYI(1, ("allocation size less than end of file"));
301 cFYI(1, ("Size %ld and blocks %llu",
302 (unsigned long) inode->i_size,
303 (unsigned long long)inode->i_blocks));
304
305 cifs_set_ops(inode, is_dfs_referral);
306cgiiu_exit:
307 return rc; 270 return rc;
308} 271}
309 272
310static int decode_sfu_inode(struct inode *inode, __u64 size, 273static int
311 const unsigned char *path, 274cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
312 struct cifs_sb_info *cifs_sb, int xid) 275 struct cifs_sb_info *cifs_sb, int xid)
313{ 276{
314 int rc; 277 int rc;
315 int oplock = 0; 278 int oplock = 0;
@@ -321,10 +284,15 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
321 284
322 pbuf = buf; 285 pbuf = buf;
323 286
324 if (size == 0) { 287 fattr->cf_mode &= ~S_IFMT;
325 inode->i_mode |= S_IFIFO; 288
289 if (fattr->cf_eof == 0) {
290 fattr->cf_mode |= S_IFIFO;
291 fattr->cf_dtype = DT_FIFO;
326 return 0; 292 return 0;
327 } else if (size < 8) { 293 } else if (fattr->cf_eof < 8) {
294 fattr->cf_mode |= S_IFREG;
295 fattr->cf_dtype = DT_REG;
328 return -EINVAL; /* EOPNOTSUPP? */ 296 return -EINVAL; /* EOPNOTSUPP? */
329 } 297 }
330 298
@@ -336,42 +304,46 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
336 if (rc == 0) { 304 if (rc == 0) {
337 int buf_type = CIFS_NO_BUFFER; 305 int buf_type = CIFS_NO_BUFFER;
338 /* Read header */ 306 /* Read header */
339 rc = CIFSSMBRead(xid, pTcon, 307 rc = CIFSSMBRead(xid, pTcon, netfid,
340 netfid,
341 24 /* length */, 0 /* offset */, 308 24 /* length */, 0 /* offset */,
342 &bytes_read, &pbuf, &buf_type); 309 &bytes_read, &pbuf, &buf_type);
343 if ((rc == 0) && (bytes_read >= 8)) { 310 if ((rc == 0) && (bytes_read >= 8)) {
344 if (memcmp("IntxBLK", pbuf, 8) == 0) { 311 if (memcmp("IntxBLK", pbuf, 8) == 0) {
345 cFYI(1, ("Block device")); 312 cFYI(1, ("Block device"));
346 inode->i_mode |= S_IFBLK; 313 fattr->cf_mode |= S_IFBLK;
314 fattr->cf_dtype = DT_BLK;
347 if (bytes_read == 24) { 315 if (bytes_read == 24) {
348 /* we have enough to decode dev num */ 316 /* we have enough to decode dev num */
349 __u64 mjr; /* major */ 317 __u64 mjr; /* major */
350 __u64 mnr; /* minor */ 318 __u64 mnr; /* minor */
351 mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); 319 mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
352 mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); 320 mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
353 inode->i_rdev = MKDEV(mjr, mnr); 321 fattr->cf_rdev = MKDEV(mjr, mnr);
354 } 322 }
355 } else if (memcmp("IntxCHR", pbuf, 8) == 0) { 323 } else if (memcmp("IntxCHR", pbuf, 8) == 0) {
356 cFYI(1, ("Char device")); 324 cFYI(1, ("Char device"));
357 inode->i_mode |= S_IFCHR; 325 fattr->cf_mode |= S_IFCHR;
326 fattr->cf_dtype = DT_CHR;
358 if (bytes_read == 24) { 327 if (bytes_read == 24) {
359 /* we have enough to decode dev num */ 328 /* we have enough to decode dev num */
360 __u64 mjr; /* major */ 329 __u64 mjr; /* major */
361 __u64 mnr; /* minor */ 330 __u64 mnr; /* minor */
362 mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); 331 mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
363 mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); 332 mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
364 inode->i_rdev = MKDEV(mjr, mnr); 333 fattr->cf_rdev = MKDEV(mjr, mnr);
365 } 334 }
366 } else if (memcmp("IntxLNK", pbuf, 7) == 0) { 335 } else if (memcmp("IntxLNK", pbuf, 7) == 0) {
367 cFYI(1, ("Symlink")); 336 cFYI(1, ("Symlink"));
368 inode->i_mode |= S_IFLNK; 337 fattr->cf_mode |= S_IFLNK;
338 fattr->cf_dtype = DT_LNK;
369 } else { 339 } else {
370 inode->i_mode |= S_IFREG; /* file? */ 340 fattr->cf_mode |= S_IFREG; /* file? */
341 fattr->cf_dtype = DT_REG;
371 rc = -EOPNOTSUPP; 342 rc = -EOPNOTSUPP;
372 } 343 }
373 } else { 344 } else {
374 inode->i_mode |= S_IFREG; /* then it is a file */ 345 fattr->cf_mode |= S_IFREG; /* then it is a file */
346 fattr->cf_dtype = DT_REG;
375 rc = -EOPNOTSUPP; /* or some unknown SFU type */ 347 rc = -EOPNOTSUPP; /* or some unknown SFU type */
376 } 348 }
377 CIFSSMBClose(xid, pTcon, netfid); 349 CIFSSMBClose(xid, pTcon, netfid);
@@ -381,9 +353,13 @@ static int decode_sfu_inode(struct inode *inode, __u64 size,
381 353
382#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */ 354#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */
383 355
384static int get_sfu_mode(struct inode *inode, 356/*
385 const unsigned char *path, 357 * Fetch mode bits as provided by SFU.
386 struct cifs_sb_info *cifs_sb, int xid) 358 *
359 * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
360 */
361static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
362 struct cifs_sb_info *cifs_sb, int xid)
387{ 363{
388#ifdef CONFIG_CIFS_XATTR 364#ifdef CONFIG_CIFS_XATTR
389 ssize_t rc; 365 ssize_t rc;
@@ -391,68 +367,80 @@ static int get_sfu_mode(struct inode *inode,
391 __u32 mode; 367 __u32 mode;
392 368
393 rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS", 369 rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
394 ea_value, 4 /* size of buf */, cifs_sb->local_nls, 370 ea_value, 4 /* size of buf */, cifs_sb->local_nls,
395 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 371 cifs_sb->mnt_cifs_flags &
372 CIFS_MOUNT_MAP_SPECIAL_CHR);
396 if (rc < 0) 373 if (rc < 0)
397 return (int)rc; 374 return (int)rc;
398 else if (rc > 3) { 375 else if (rc > 3) {
399 mode = le32_to_cpu(*((__le32 *)ea_value)); 376 mode = le32_to_cpu(*((__le32 *)ea_value));
400 inode->i_mode &= ~SFBITS_MASK; 377 fattr->cf_mode &= ~SFBITS_MASK;
401 cFYI(1, ("special bits 0%o org mode 0%o", mode, inode->i_mode)); 378 cFYI(1, ("special bits 0%o org mode 0%o", mode,
402 inode->i_mode = (mode & SFBITS_MASK) | inode->i_mode; 379 fattr->cf_mode));
380 fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
403 cFYI(1, ("special mode bits 0%o", mode)); 381 cFYI(1, ("special mode bits 0%o", mode));
404 return 0;
405 } else {
406 return 0;
407 } 382 }
383
384 return 0;
408#else 385#else
409 return -EOPNOTSUPP; 386 return -EOPNOTSUPP;
410#endif 387#endif
411} 388}
412 389
413/* 390/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
414 * Needed to setup inode data for the directory which is the 391static void
415 * junction to the new submount (ie to setup the fake directory 392cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
416 * which represents a DFS referral) 393 struct cifs_sb_info *cifs_sb, bool adjust_tz)
417 */
418static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat,
419 struct super_block *sb)
420{ 394{
421 memset(pfnd_dat, 0, sizeof(FILE_ALL_INFO)); 395 memset(fattr, 0, sizeof(*fattr));
422 396 fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
423/* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0); 397 if (info->DeletePending)
424 __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); 398 fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
425 __u8 pfnd_dat->DeletePending = 0; 399
426 __u8 pfnd_data->Directory = 0; 400 if (info->LastAccessTime)
427 __le32 pfnd_dat->EASize = 0; 401 fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
428 __u64 pfnd_dat->IndexNumber = 0; 402 else
429 __u64 pfnd_dat->IndexNumber1 = 0; */ 403 fattr->cf_atime = CURRENT_TIME;
430 pfnd_dat->CreationTime = 404
431 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 405 fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
432 pfnd_dat->LastAccessTime = 406 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
433 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 407
434 pfnd_dat->LastWriteTime = 408 if (adjust_tz) {
435 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 409 fattr->cf_ctime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
436 pfnd_dat->ChangeTime = 410 fattr->cf_mtime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
437 cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 411 }
438 pfnd_dat->Attributes = cpu_to_le32(ATTR_DIRECTORY); 412
439 pfnd_dat->NumberOfLinks = cpu_to_le32(2); 413 fattr->cf_eof = le64_to_cpu(info->EndOfFile);
414 fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
415
416 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
417 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
418 fattr->cf_dtype = DT_DIR;
419 } else {
420 fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
421 fattr->cf_dtype = DT_REG;
422
423 /* clear write bits if ATTR_READONLY is set */
424 if (fattr->cf_cifsattrs & ATTR_READONLY)
425 fattr->cf_mode &= ~(S_IWUGO);
426 }
427
428 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
429
430 fattr->cf_uid = cifs_sb->mnt_uid;
431 fattr->cf_gid = cifs_sb->mnt_gid;
440} 432}
441 433
442int cifs_get_inode_info(struct inode **pinode, 434int cifs_get_inode_info(struct inode **pinode,
443 const unsigned char *full_path, FILE_ALL_INFO *pfindData, 435 const unsigned char *full_path, FILE_ALL_INFO *pfindData,
444 struct super_block *sb, int xid, const __u16 *pfid) 436 struct super_block *sb, int xid, const __u16 *pfid)
445{ 437{
446 int rc = 0; 438 int rc = 0, tmprc;
447 __u32 attr;
448 struct cifsInodeInfo *cifsInfo;
449 struct cifsTconInfo *pTcon; 439 struct cifsTconInfo *pTcon;
450 struct inode *inode;
451 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 440 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
452 char *buf = NULL; 441 char *buf = NULL;
453 bool adjustTZ = false; 442 bool adjustTZ = false;
454 bool is_dfs_referral = false; 443 struct cifs_fattr fattr;
455 umode_t default_mode;
456 444
457 pTcon = cifs_sb->tcon; 445 pTcon = cifs_sb->tcon;
458 cFYI(1, ("Getting info on %s", full_path)); 446 cFYI(1, ("Getting info on %s", full_path));
@@ -487,163 +475,85 @@ int cifs_get_inode_info(struct inode **pinode,
487 adjustTZ = true; 475 adjustTZ = true;
488 } 476 }
489 } 477 }
490 /* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */ 478
491 if (rc == -EREMOTE) { 479 if (!rc) {
492 is_dfs_referral = true; 480 cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData,
493 fill_fake_finddata(pfindData, sb); 481 cifs_sb, adjustTZ);
482 } else if (rc == -EREMOTE) {
483 cifs_create_dfs_fattr(&fattr, sb);
494 rc = 0; 484 rc = 0;
495 } else if (rc) 485 } else {
496 goto cgii_exit; 486 goto cgii_exit;
487 }
497 488
498 attr = le32_to_cpu(pfindData->Attributes); 489 /*
499 490 * If an inode wasn't passed in, then get the inode number
500 /* get new inode */ 491 *
492 * Is an i_ino of zero legal? Can we use that to check if the server
493 * supports returning inode numbers? Are there other sanity checks we
494 * can use to ensure that the server is really filling in that field?
495 *
496 * We can not use the IndexNumber field by default from Windows or
497 * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
498 * CIFS spec claims that this value is unique within the scope of a
499 * share, and the windows docs hint that it's actually unique
500 * per-machine.
501 *
502 * There may be higher info levels that work but are there Windows
503 * server or network appliances for which IndexNumber field is not
504 * guaranteed unique?
505 */
501 if (*pinode == NULL) { 506 if (*pinode == NULL) {
502 __u64 inode_num;
503 __u64 *pinum = &inode_num;
504
505 /* Is an i_ino of zero legal? Can we use that to check
506 if the server supports returning inode numbers? Are
507 there other sanity checks we can use to ensure that
508 the server is really filling in that field? */
509
510 /* We can not use the IndexNumber field by default from
511 Windows or Samba (in ALL_INFO buf) but we can request
512 it explicitly. It may not be unique presumably if
513 the server has multiple devices mounted under one share */
514
515 /* There may be higher info levels that work but are
516 there Windows server or network appliances for which
517 IndexNumber field is not guaranteed unique? */
518
519 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 507 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
520 int rc1 = 0; 508 int rc1 = 0;
521 509
522 rc1 = CIFSGetSrvInodeNumber(xid, pTcon, 510 rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
523 full_path, pinum, 511 full_path, &fattr.cf_uniqueid,
524 cifs_sb->local_nls, 512 cifs_sb->local_nls,
525 cifs_sb->mnt_cifs_flags & 513 cifs_sb->mnt_cifs_flags &
526 CIFS_MOUNT_MAP_SPECIAL_CHR); 514 CIFS_MOUNT_MAP_SPECIAL_CHR);
527 if (rc1) { 515 if (rc1) {
528 cFYI(1, ("GetSrvInodeNum rc %d", rc1)); 516 cFYI(1, ("GetSrvInodeNum rc %d", rc1));
529 pinum = NULL; 517 fattr.cf_uniqueid = iunique(sb, ROOT_I);
530 /* BB EOPNOSUPP disable SERVER_INUM? */ 518 /* disable serverino if call not supported */
519 if (rc1 == -EINVAL)
520 cifs_sb->mnt_cifs_flags &=
521 ~CIFS_MOUNT_SERVER_INUM;
531 } 522 }
532 } else { 523 } else {
533 pinum = NULL; 524 fattr.cf_uniqueid = iunique(sb, ROOT_I);
534 }
535
536 *pinode = cifs_new_inode(sb, pinum);
537 if (*pinode == NULL) {
538 rc = -ENOMEM;
539 goto cgii_exit;
540 } 525 }
541 }
542 inode = *pinode;
543 cifsInfo = CIFS_I(inode);
544 cifsInfo->cifsAttrs = attr;
545 cifsInfo->delete_pending = pfindData->DeletePending ? true : false;
546 cFYI(1, ("Old time %ld", cifsInfo->time));
547 cifsInfo->time = jiffies;
548 cFYI(1, ("New time %ld", cifsInfo->time));
549
550 /* blksize needs to be multiple of two. So safer to default to
551 blksize and blkbits set in superblock so 2**blkbits and blksize
552 will match rather than setting to:
553 (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
554
555 /* Linux can not store file creation time so ignore it */
556 if (pfindData->LastAccessTime)
557 inode->i_atime = cifs_NTtimeToUnix(pfindData->LastAccessTime);
558 else /* do not need to use current_fs_time - time not stored */
559 inode->i_atime = CURRENT_TIME;
560 inode->i_mtime = cifs_NTtimeToUnix(pfindData->LastWriteTime);
561 inode->i_ctime = cifs_NTtimeToUnix(pfindData->ChangeTime);
562 cFYI(DBG2, ("Attributes came in as 0x%x", attr));
563 if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
564 inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
565 inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
566 }
567
568 /* get default inode mode */
569 if (attr & ATTR_DIRECTORY)
570 default_mode = cifs_sb->mnt_dir_mode;
571 else
572 default_mode = cifs_sb->mnt_file_mode;
573
574 /* set permission bits */
575 if (atomic_read(&cifsInfo->inUse) == 0 ||
576 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
577 inode->i_mode = default_mode;
578 else {
579 /* just reenable write bits if !ATTR_READONLY */
580 if ((inode->i_mode & S_IWUGO) == 0 &&
581 (attr & ATTR_READONLY) == 0)
582 inode->i_mode |= (S_IWUGO & default_mode);
583
584 inode->i_mode &= ~S_IFMT;
585 }
586 /* clear write bits if ATTR_READONLY is set */
587 if (attr & ATTR_READONLY)
588 inode->i_mode &= ~S_IWUGO;
589
590 /* set inode type */
591 if ((attr & ATTR_SYSTEM) &&
592 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
593 /* no need to fix endianness on 0 */
594 if (pfindData->EndOfFile == 0)
595 inode->i_mode |= S_IFIFO;
596 else if (decode_sfu_inode(inode,
597 le64_to_cpu(pfindData->EndOfFile),
598 full_path, cifs_sb, xid))
599 cFYI(1, ("unknown SFU file type\n"));
600 } else { 526 } else {
601 if (attr & ATTR_DIRECTORY) 527 fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid;
602 inode->i_mode |= S_IFDIR;
603 else
604 inode->i_mode |= S_IFREG;
605 } 528 }
606 529
607 cifsInfo->server_eof = le64_to_cpu(pfindData->EndOfFile); 530 /* query for SFU type info if supported and needed */
608 spin_lock(&inode->i_lock); 531 if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
609 if (is_size_safe_to_change(cifsInfo, cifsInfo->server_eof)) { 532 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
610 /* can not safely shrink the file size here if the 533 tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
611 client is writing to it due to potential races */ 534 if (tmprc)
612 i_size_write(inode, cifsInfo->server_eof); 535 cFYI(1, ("cifs_sfu_type failed: %d", tmprc));
613
614 /* 512 bytes (2**9) is the fake blocksize that must be
615 used for this calculation */
616 inode->i_blocks = (512 - 1 + le64_to_cpu(
617 pfindData->AllocationSize)) >> 9;
618 } 536 }
619 spin_unlock(&inode->i_lock);
620 537
621 inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
622
623 /* BB fill in uid and gid here? with help from winbind?
624 or retrieve from NTFS stream extended attribute */
625#ifdef CONFIG_CIFS_EXPERIMENTAL 538#ifdef CONFIG_CIFS_EXPERIMENTAL
626 /* fill in 0777 bits from ACL */ 539 /* fill in 0777 bits from ACL */
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 540 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
628 cFYI(1, ("Getting mode bits from ACL")); 541 cFYI(1, ("Getting mode bits from ACL"));
629 acl_to_uid_mode(cifs_sb, inode, full_path, pfid); 542 cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid);
630 } 543 }
631#endif 544#endif
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
633 /* fill in remaining high mode bits e.g. SUID, VTX */
634 get_sfu_mode(inode, full_path, cifs_sb, xid);
635 } else if (atomic_read(&cifsInfo->inUse) == 0) {
636 inode->i_uid = cifs_sb->mnt_uid;
637 inode->i_gid = cifs_sb->mnt_gid;
638 /* set so we do not keep refreshing these fields with
639 bad data after user has changed them in memory */
640 atomic_set(&cifsInfo->inUse, 1);
641 }
642
643 cifs_set_ops(inode, is_dfs_referral);
644
645 545
546 /* fill in remaining high mode bits e.g. SUID, VTX */
547 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
548 cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
646 549
550 if (!*pinode) {
551 *pinode = cifs_iget(sb, &fattr);
552 if (!*pinode)
553 rc = -ENOMEM;
554 } else {
555 cifs_fattr_to_inode(*pinode, &fattr);
556 }
647 557
648cgii_exit: 558cgii_exit:
649 kfree(buf); 559 kfree(buf);
@@ -695,33 +605,78 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
695 return full_path; 605 return full_path;
696} 606}
697 607
608static int
609cifs_find_inode(struct inode *inode, void *opaque)
610{
611 struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
612
613 if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
614 return 0;
615
616 return 1;
617}
618
619static int
620cifs_init_inode(struct inode *inode, void *opaque)
621{
622 struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
623
624 CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
625 return 0;
626}
627
628/* Given fattrs, get a corresponding inode */
629struct inode *
630cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
631{
632 unsigned long hash;
633 struct inode *inode;
634
635 cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
636
637 /* hash down to 32-bits on 32-bit arch */
638 hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
639
640 inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
641
642 /* we have fattrs in hand, update the inode */
643 if (inode) {
644 cifs_fattr_to_inode(inode, fattr);
645 if (sb->s_flags & MS_NOATIME)
646 inode->i_flags |= S_NOATIME | S_NOCMTIME;
647 if (inode->i_state & I_NEW) {
648 inode->i_ino = hash;
649 unlock_new_inode(inode);
650 }
651 }
652
653 return inode;
654}
655
698/* gets root inode */ 656/* gets root inode */
699struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) 657struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
700{ 658{
701 int xid; 659 int xid;
702 struct cifs_sb_info *cifs_sb; 660 struct cifs_sb_info *cifs_sb;
703 struct inode *inode; 661 struct inode *inode = NULL;
704 long rc; 662 long rc;
705 char *full_path; 663 char *full_path;
706 664
707 inode = iget_locked(sb, ino); 665 cifs_sb = CIFS_SB(sb);
708 if (!inode)
709 return ERR_PTR(-ENOMEM);
710 if (!(inode->i_state & I_NEW))
711 return inode;
712
713 cifs_sb = CIFS_SB(inode->i_sb);
714 full_path = cifs_build_path_to_root(cifs_sb); 666 full_path = cifs_build_path_to_root(cifs_sb);
715 if (full_path == NULL) 667 if (full_path == NULL)
716 return ERR_PTR(-ENOMEM); 668 return ERR_PTR(-ENOMEM);
717 669
718 xid = GetXid(); 670 xid = GetXid();
719 if (cifs_sb->tcon->unix_ext) 671 if (cifs_sb->tcon->unix_ext)
720 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, 672 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
721 xid);
722 else 673 else
723 rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, 674 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
724 xid, NULL); 675 xid, NULL);
676
677 if (!inode)
678 return ERR_PTR(-ENOMEM);
679
725 if (rc && cifs_sb->tcon->ipc) { 680 if (rc && cifs_sb->tcon->ipc) {
726 cFYI(1, ("ipc connection - fake read inode")); 681 cFYI(1, ("ipc connection - fake read inode"));
727 inode->i_mode |= S_IFDIR; 682 inode->i_mode |= S_IFDIR;
@@ -737,7 +692,6 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
737 return ERR_PTR(rc); 692 return ERR_PTR(rc);
738 } 693 }
739 694
740 unlock_new_inode(inode);
741 695
742 kfree(full_path); 696 kfree(full_path);
743 /* can not call macro FreeXid here since in a void func 697 /* can not call macro FreeXid here since in a void func
@@ -988,8 +942,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
988 * sb->s_vfs_rename_mutex here */ 942 * sb->s_vfs_rename_mutex here */
989 full_path = build_path_from_dentry(dentry); 943 full_path = build_path_from_dentry(dentry);
990 if (full_path == NULL) { 944 if (full_path == NULL) {
945 rc = -ENOMEM;
991 FreeXid(xid); 946 FreeXid(xid);
992 return -ENOMEM; 947 return rc;
993 } 948 }
994 949
995 if ((tcon->ses->capabilities & CAP_UNIX) && 950 if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1062,44 +1017,6 @@ out_reval:
1062 return rc; 1017 return rc;
1063} 1018}
1064 1019
1065void posix_fill_in_inode(struct inode *tmp_inode,
1066 FILE_UNIX_BASIC_INFO *pData, int isNewInode)
1067{
1068 struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
1069 loff_t local_size;
1070 struct timespec local_mtime;
1071
1072 cifsInfo->time = jiffies;
1073 atomic_inc(&cifsInfo->inUse);
1074
1075 /* save mtime and size */
1076 local_mtime = tmp_inode->i_mtime;
1077 local_size = tmp_inode->i_size;
1078
1079 cifs_unix_info_to_inode(tmp_inode, pData, 1);
1080 cifs_set_ops(tmp_inode, false);
1081
1082 if (!S_ISREG(tmp_inode->i_mode))
1083 return;
1084
1085 /*
1086 * No sense invalidating pages for new inode
1087 * since we we have not started caching
1088 * readahead file data yet.
1089 */
1090 if (isNewInode)
1091 return;
1092
1093 if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
1094 (local_size == tmp_inode->i_size)) {
1095 cFYI(1, ("inode exists but unchanged"));
1096 } else {
1097 /* file may have changed on server */
1098 cFYI(1, ("invalidate inode, readdir detected change"));
1099 invalidate_remote_inode(tmp_inode);
1100 }
1101}
1102
1103int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) 1020int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1104{ 1021{
1105 int rc = 0, tmprc; 1022 int rc = 0, tmprc;
@@ -1108,6 +1025,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1108 struct cifsTconInfo *pTcon; 1025 struct cifsTconInfo *pTcon;
1109 char *full_path = NULL; 1026 char *full_path = NULL;
1110 struct inode *newinode = NULL; 1027 struct inode *newinode = NULL;
1028 struct cifs_fattr fattr;
1111 1029
1112 cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode)); 1030 cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode));
1113 1031
@@ -1118,8 +1036,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1118 1036
1119 full_path = build_path_from_dentry(direntry); 1037 full_path = build_path_from_dentry(direntry);
1120 if (full_path == NULL) { 1038 if (full_path == NULL) {
1039 rc = -ENOMEM;
1121 FreeXid(xid); 1040 FreeXid(xid);
1122 return -ENOMEM; 1041 return rc;
1123 } 1042 }
1124 1043
1125 if ((pTcon->ses->capabilities & CAP_UNIX) && 1044 if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1146,7 +1065,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1146 cFYI(1, ("posix mkdir returned 0x%x", rc)); 1065 cFYI(1, ("posix mkdir returned 0x%x", rc));
1147 d_drop(direntry); 1066 d_drop(direntry);
1148 } else { 1067 } else {
1149 __u64 unique_id;
1150 if (pInfo->Type == cpu_to_le32(-1)) { 1068 if (pInfo->Type == cpu_to_le32(-1)) {
1151 /* no return info, go query for it */ 1069 /* no return info, go query for it */
1152 kfree(pInfo); 1070 kfree(pInfo);
@@ -1160,20 +1078,15 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1160 else 1078 else
1161 direntry->d_op = &cifs_dentry_ops; 1079 direntry->d_op = &cifs_dentry_ops;
1162 1080
1163 unique_id = le64_to_cpu(pInfo->UniqueId); 1081 cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
1164 newinode = cifs_new_inode(inode->i_sb, &unique_id); 1082 newinode = cifs_iget(inode->i_sb, &fattr);
1165 if (newinode == NULL) { 1083 if (!newinode) {
1166 kfree(pInfo); 1084 kfree(pInfo);
1167 goto mkdir_get_info; 1085 goto mkdir_get_info;
1168 } 1086 }
1169 1087
1170 newinode->i_nlink = 2;
1171 d_instantiate(direntry, newinode); 1088 d_instantiate(direntry, newinode);
1172 1089
1173 /* we already checked in POSIXCreate whether
1174 frame was long enough */
1175 posix_fill_in_inode(direntry->d_inode,
1176 pInfo, 1 /* NewInode */);
1177#ifdef CONFIG_CIFS_DEBUG2 1090#ifdef CONFIG_CIFS_DEBUG2
1178 cFYI(1, ("instantiated dentry %p %s to inode %p", 1091 cFYI(1, ("instantiated dentry %p %s to inode %p",
1179 direntry, direntry->d_name.name, newinode)); 1092 direntry, direntry->d_name.name, newinode));
@@ -1236,10 +1149,10 @@ mkdir_get_info:
1236 args.uid = NO_CHANGE_64; 1149 args.uid = NO_CHANGE_64;
1237 args.gid = NO_CHANGE_64; 1150 args.gid = NO_CHANGE_64;
1238 } 1151 }
1239 CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args, 1152 CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
1240 cifs_sb->local_nls, 1153 cifs_sb->local_nls,
1241 cifs_sb->mnt_cifs_flags & 1154 cifs_sb->mnt_cifs_flags &
1242 CIFS_MOUNT_MAP_SPECIAL_CHR); 1155 CIFS_MOUNT_MAP_SPECIAL_CHR);
1243 } else { 1156 } else {
1244 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && 1157 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
1245 (mode & S_IWUGO) == 0) { 1158 (mode & S_IWUGO) == 0) {
@@ -1303,8 +1216,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1303 1216
1304 full_path = build_path_from_dentry(direntry); 1217 full_path = build_path_from_dentry(direntry);
1305 if (full_path == NULL) { 1218 if (full_path == NULL) {
1219 rc = -ENOMEM;
1306 FreeXid(xid); 1220 FreeXid(xid);
1307 return -ENOMEM; 1221 return rc;
1308 } 1222 }
1309 1223
1310 rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, 1224 rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
@@ -1508,8 +1422,9 @@ int cifs_revalidate(struct dentry *direntry)
1508 since that would deadlock */ 1422 since that would deadlock */
1509 full_path = build_path_from_dentry(direntry); 1423 full_path = build_path_from_dentry(direntry);
1510 if (full_path == NULL) { 1424 if (full_path == NULL) {
1425 rc = -ENOMEM;
1511 FreeXid(xid); 1426 FreeXid(xid);
1512 return -ENOMEM; 1427 return rc;
1513 } 1428 }
1514 cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " 1429 cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
1515 "jiffies %ld", full_path, direntry->d_inode, 1430 "jiffies %ld", full_path, direntry->d_inode,
@@ -1618,6 +1533,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1618 if (!err) { 1533 if (!err) {
1619 generic_fillattr(dentry->d_inode, stat); 1534 generic_fillattr(dentry->d_inode, stat);
1620 stat->blksize = CIFS_MAX_MSGSIZE; 1535 stat->blksize = CIFS_MAX_MSGSIZE;
1536 stat->ino = CIFS_I(dentry->d_inode)->uniqueid;
1621 } 1537 }
1622 return err; 1538 return err;
1623} 1539}
@@ -1782,6 +1698,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1782 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1698 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1783 struct cifsTconInfo *pTcon = cifs_sb->tcon; 1699 struct cifsTconInfo *pTcon = cifs_sb->tcon;
1784 struct cifs_unix_set_info_args *args = NULL; 1700 struct cifs_unix_set_info_args *args = NULL;
1701 struct cifsFileInfo *open_file;
1785 1702
1786 cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x", 1703 cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x",
1787 direntry->d_name.name, attrs->ia_valid)); 1704 direntry->d_name.name, attrs->ia_valid));
@@ -1868,10 +1785,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1868 args->ctime = NO_CHANGE_64; 1785 args->ctime = NO_CHANGE_64;
1869 1786
1870 args->device = 0; 1787 args->device = 0;
1871 rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path, args, 1788 open_file = find_writable_file(cifsInode);
1872 cifs_sb->local_nls, 1789 if (open_file) {
1873 cifs_sb->mnt_cifs_flags & 1790 u16 nfid = open_file->netfid;
1874 CIFS_MOUNT_MAP_SPECIAL_CHR); 1791 u32 npid = open_file->pid;
1792 rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
1793 atomic_dec(&open_file->wrtPending);
1794 } else {
1795 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
1796 cifs_sb->local_nls,
1797 cifs_sb->mnt_cifs_flags &
1798 CIFS_MOUNT_MAP_SPECIAL_CHR);
1799 }
1875 1800
1876 if (!rc) 1801 if (!rc)
1877 rc = inode_setattr(inode, attrs); 1802 rc = inode_setattr(inode, attrs);
@@ -1911,8 +1836,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
1911 1836
1912 full_path = build_path_from_dentry(direntry); 1837 full_path = build_path_from_dentry(direntry);
1913 if (full_path == NULL) { 1838 if (full_path == NULL) {
1839 rc = -ENOMEM;
1914 FreeXid(xid); 1840 FreeXid(xid);
1915 return -ENOMEM; 1841 return rc;
1916 } 1842 }
1917 1843
1918 /* 1844 /*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cd83c53fcbb..fc1e0487eae 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -172,8 +172,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
172 full_path = build_path_from_dentry(direntry); 172 full_path = build_path_from_dentry(direntry);
173 173
174 if (full_path == NULL) { 174 if (full_path == NULL) {
175 rc = -ENOMEM;
175 FreeXid(xid); 176 FreeXid(xid);
176 return -ENOMEM; 177 return rc;
177 } 178 }
178 179
179 cFYI(1, ("Full path: %s", full_path)); 180 cFYI(1, ("Full path: %s", full_path));
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 32d6baa0a54..bd6d6895730 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -133,10 +133,12 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
133 {0, 0} 133 {0, 0}
134}; 134};
135 135
136/* Convert string containing dotted ip address to binary form */ 136/*
137/* returns 0 if invalid address */ 137 * Convert a string containing text IPv4 or IPv6 address to binary form.
138 138 *
139int 139 * Returns 0 on failure.
140 */
141static int
140cifs_inet_pton(const int address_family, const char *cp, void *dst) 142cifs_inet_pton(const int address_family, const char *cp, void *dst)
141{ 143{
142 int ret = 0; 144 int ret = 0;
@@ -153,6 +155,52 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
153 return ret; 155 return ret;
154} 156}
155 157
158/*
159 * Try to convert a string to an IPv4 address and then attempt to convert
160 * it to an IPv6 address if that fails. Set the family field if either
161 * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
162 * treat the part following it as a numeric sin6_scope_id.
163 *
164 * Returns 0 on failure.
165 */
166int
167cifs_convert_address(char *src, void *dst)
168{
169 int rc;
170 char *pct, *endp;
171 struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
172 struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
173
174 /* IPv4 address */
175 if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) {
176 s4->sin_family = AF_INET;
177 return 1;
178 }
179
180 /* temporarily terminate string */
181 pct = strchr(src, '%');
182 if (pct)
183 *pct = '\0';
184
185 rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr);
186
187 /* repair temp termination (if any) and make pct point to scopeid */
188 if (pct)
189 *pct++ = '%';
190
191 if (!rc)
192 return rc;
193
194 s6->sin6_family = AF_INET6;
195 if (pct) {
196 s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
197 if (!*pct || *endp)
198 return 0;
199 }
200
201 return rc;
202}
203
156/***************************************************************************** 204/*****************************************************************************
157convert a NT status code to a dos class/code 205convert a NT status code to a dos class/code
158 *****************************************************************************/ 206 *****************************************************************************/
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 86d0055dc52..f823a4a208a 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -63,374 +63,123 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
63} 63}
64#endif /* DEBUG2 */ 64#endif /* DEBUG2 */
65 65
66/* Returns 1 if new inode created, 2 if both dentry and inode were */ 66/*
67/* Might check in the future if inode number changed so we can rehash inode */ 67 * Find the dentry that matches "name". If there isn't one, create one. If it's
68static int 68 * a negative dentry or the uniqueid changed, then drop it and recreate it.
69construct_dentry(struct qstr *qstring, struct file *file, 69 */
70 struct inode **ptmp_inode, struct dentry **pnew_dentry, 70static struct dentry *
71 __u64 *inum) 71cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
72 struct cifs_fattr *fattr)
72{ 73{
73 struct dentry *tmp_dentry = NULL; 74 struct dentry *dentry, *alias;
74 struct super_block *sb = file->f_path.dentry->d_sb; 75 struct inode *inode;
75 int rc = 0; 76 struct super_block *sb = parent->d_inode->i_sb;
77
78 cFYI(1, ("For %s", name->name));
79
80 dentry = d_lookup(parent, name);
81 if (dentry) {
82 /* FIXME: check for inode number changes? */
83 if (dentry->d_inode != NULL)
84 return dentry;
85 d_drop(dentry);
86 dput(dentry);
87 }
76 88
77 cFYI(1, ("For %s", qstring->name)); 89 dentry = d_alloc(parent, name);
78 90 if (dentry == NULL)
79 qstring->hash = full_name_hash(qstring->name, qstring->len); 91 return NULL;
80 tmp_dentry = d_lookup(file->f_path.dentry, qstring);
81 if (tmp_dentry) {
82 /* BB: overwrite old name? i.e. tmp_dentry->d_name and
83 * tmp_dentry->d_name.len??
84 */
85 cFYI(0, ("existing dentry with inode 0x%p",
86 tmp_dentry->d_inode));
87 *ptmp_inode = tmp_dentry->d_inode;
88 if (*ptmp_inode == NULL) {
89 *ptmp_inode = cifs_new_inode(sb, inum);
90 if (*ptmp_inode == NULL)
91 return rc;
92 rc = 1;
93 }
94 } else {
95 tmp_dentry = d_alloc(file->f_path.dentry, qstring);
96 if (tmp_dentry == NULL) {
97 cERROR(1, ("Failed allocating dentry"));
98 *ptmp_inode = NULL;
99 return rc;
100 }
101 92
102 if (CIFS_SB(sb)->tcon->nocase) 93 inode = cifs_iget(sb, fattr);
103 tmp_dentry->d_op = &cifs_ci_dentry_ops; 94 if (!inode) {
104 else 95 dput(dentry);
105 tmp_dentry->d_op = &cifs_dentry_ops; 96 return NULL;
97 }
106 98
107 *ptmp_inode = cifs_new_inode(sb, inum); 99 if (CIFS_SB(sb)->tcon->nocase)
108 if (*ptmp_inode == NULL) 100 dentry->d_op = &cifs_ci_dentry_ops;
109 return rc; 101 else
110 rc = 2; 102 dentry->d_op = &cifs_dentry_ops;
103
104 alias = d_materialise_unique(dentry, inode);
105 if (alias != NULL) {
106 dput(dentry);
107 if (IS_ERR(alias))
108 return NULL;
109 dentry = alias;
111 } 110 }
112 111
113 tmp_dentry->d_time = jiffies; 112 return dentry;
114 *pnew_dentry = tmp_dentry;
115 return rc;
116} 113}
117 114
118static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, 115static void
119 char *buf, unsigned int *pobject_type, int isNewInode) 116cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
120{ 117{
121 loff_t local_size; 118 fattr->cf_uid = cifs_sb->mnt_uid;
122 struct timespec local_mtime; 119 fattr->cf_gid = cifs_sb->mnt_gid;
123
124 struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
125 struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
126 __u32 attr;
127 __u64 allocation_size;
128 __u64 end_of_file;
129 umode_t default_mode;
130
131 /* save mtime and size */
132 local_mtime = tmp_inode->i_mtime;
133 local_size = tmp_inode->i_size;
134
135 if (new_buf_type) {
136 FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf;
137
138 attr = le32_to_cpu(pfindData->ExtFileAttributes);
139 allocation_size = le64_to_cpu(pfindData->AllocationSize);
140 end_of_file = le64_to_cpu(pfindData->EndOfFile);
141 tmp_inode->i_atime =
142 cifs_NTtimeToUnix(pfindData->LastAccessTime);
143 tmp_inode->i_mtime =
144 cifs_NTtimeToUnix(pfindData->LastWriteTime);
145 tmp_inode->i_ctime =
146 cifs_NTtimeToUnix(pfindData->ChangeTime);
147 } else { /* legacy, OS2 and DOS style */
148 int offset = cifs_sb->tcon->ses->server->timeAdj;
149 FIND_FILE_STANDARD_INFO *pfindData =
150 (FIND_FILE_STANDARD_INFO *)buf;
151
152 tmp_inode->i_mtime = cnvrtDosUnixTm(pfindData->LastWriteDate,
153 pfindData->LastWriteTime,
154 offset);
155 tmp_inode->i_atime = cnvrtDosUnixTm(pfindData->LastAccessDate,
156 pfindData->LastAccessTime,
157 offset);
158 tmp_inode->i_ctime = cnvrtDosUnixTm(pfindData->LastWriteDate,
159 pfindData->LastWriteTime,
160 offset);
161 attr = le16_to_cpu(pfindData->Attributes);
162 allocation_size = le32_to_cpu(pfindData->AllocationSize);
163 end_of_file = le32_to_cpu(pfindData->DataSize);
164 }
165 120
166 /* Linux can not store file creation time unfortunately so ignore it */ 121 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
167 122 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
168 cifsInfo->cifsAttrs = attr; 123 fattr->cf_dtype = DT_DIR;
169#ifdef CONFIG_CIFS_EXPERIMENTAL 124 } else {
170 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 125 fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
171 /* get more accurate mode via ACL - so force inode refresh */ 126 fattr->cf_dtype = DT_REG;
172 cifsInfo->time = 0;
173 } else
174#endif /* CONFIG_CIFS_EXPERIMENTAL */
175 cifsInfo->time = jiffies;
176
177 /* treat dos attribute of read-only as read-only mode bit e.g. 555? */
178 /* 2767 perms - indicate mandatory locking */
179 /* BB fill in uid and gid here? with help from winbind?
180 or retrieve from NTFS stream extended attribute */
181 if (atomic_read(&cifsInfo->inUse) == 0) {
182 tmp_inode->i_uid = cifs_sb->mnt_uid;
183 tmp_inode->i_gid = cifs_sb->mnt_gid;
184 }
185
186 if (attr & ATTR_DIRECTORY)
187 default_mode = cifs_sb->mnt_dir_mode;
188 else
189 default_mode = cifs_sb->mnt_file_mode;
190
191 /* set initial permissions */
192 if ((atomic_read(&cifsInfo->inUse) == 0) ||
193 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
194 tmp_inode->i_mode = default_mode;
195 else {
196 /* just reenable write bits if !ATTR_READONLY */
197 if ((tmp_inode->i_mode & S_IWUGO) == 0 &&
198 (attr & ATTR_READONLY) == 0)
199 tmp_inode->i_mode |= (S_IWUGO & default_mode);
200
201 tmp_inode->i_mode &= ~S_IFMT;
202 } 127 }
203 128
204 /* clear write bits if ATTR_READONLY is set */ 129 if (fattr->cf_cifsattrs & ATTR_READONLY)
205 if (attr & ATTR_READONLY) 130 fattr->cf_mode &= ~S_IWUGO;
206 tmp_inode->i_mode &= ~S_IWUGO;
207 131
208 /* set inode type */ 132 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
209 if ((attr & ATTR_SYSTEM) && 133 fattr->cf_cifsattrs & ATTR_SYSTEM) {
210 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { 134 if (fattr->cf_eof == 0) {
211 if (end_of_file == 0) { 135 fattr->cf_mode &= ~S_IFMT;
212 tmp_inode->i_mode |= S_IFIFO; 136 fattr->cf_mode |= S_IFIFO;
213 *pobject_type = DT_FIFO; 137 fattr->cf_dtype = DT_FIFO;
214 } else { 138 } else {
215 /* 139 /*
216 * trying to get the type can be slow, so just call 140 * trying to get the type and mode via SFU can be slow,
217 * this a regular file for now, and mark for reval 141 * so just call those regular files for now, and mark
142 * for reval
218 */ 143 */
219 tmp_inode->i_mode |= S_IFREG; 144 fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
220 *pobject_type = DT_REG;
221 cifsInfo->time = 0;
222 }
223 } else {
224 if (attr & ATTR_DIRECTORY) {
225 tmp_inode->i_mode |= S_IFDIR;
226 *pobject_type = DT_DIR;
227 } else {
228 tmp_inode->i_mode |= S_IFREG;
229 *pobject_type = DT_REG;
230 } 145 }
231 } 146 }
147}
232 148
233 /* can not fill in nlink here as in qpathinfo version and Unx search */ 149void
234 if (atomic_read(&cifsInfo->inUse) == 0) 150cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
235 atomic_set(&cifsInfo->inUse, 1); 151 struct cifs_sb_info *cifs_sb)
236 152{
237 cifsInfo->server_eof = end_of_file; 153 memset(fattr, 0, sizeof(*fattr));
238 spin_lock(&tmp_inode->i_lock); 154 fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
239 if (is_size_safe_to_change(cifsInfo, end_of_file)) { 155 fattr->cf_eof = le64_to_cpu(info->EndOfFile);
240 /* can not safely change the file size here if the 156 fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
241 client is writing to it due to potential races */ 157 fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
242 i_size_write(tmp_inode, end_of_file); 158 fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
243 159 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
244 /* 512 bytes (2**9) is the fake blocksize that must be used */ 160
245 /* for this calculation, even though the reported blocksize is larger */ 161 cifs_fill_common_info(fattr, cifs_sb);
246 tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9;
247 }
248 spin_unlock(&tmp_inode->i_lock);
249
250 if (allocation_size < end_of_file)
251 cFYI(1, ("May be sparse file, allocation less than file size"));
252 cFYI(1, ("File Size %ld and blocks %llu",
253 (unsigned long)tmp_inode->i_size,
254 (unsigned long long)tmp_inode->i_blocks));
255 if (S_ISREG(tmp_inode->i_mode)) {
256 cFYI(1, ("File inode"));
257 tmp_inode->i_op = &cifs_file_inode_ops;
258 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
259 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
260 tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
261 else
262 tmp_inode->i_fop = &cifs_file_direct_ops;
263 } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
264 tmp_inode->i_fop = &cifs_file_nobrl_ops;
265 else
266 tmp_inode->i_fop = &cifs_file_ops;
267
268 if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
269 (cifs_sb->tcon->ses->server->maxBuf <
270 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
271 tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
272 else
273 tmp_inode->i_data.a_ops = &cifs_addr_ops;
274
275 if (isNewInode)
276 return; /* No sense invalidating pages for new inode
277 since have not started caching readahead file
278 data yet */
279
280 if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
281 (local_size == tmp_inode->i_size)) {
282 cFYI(1, ("inode exists but unchanged"));
283 } else {
284 /* file may have changed on server */
285 cFYI(1, ("invalidate inode, readdir detected change"));
286 invalidate_remote_inode(tmp_inode);
287 }
288 } else if (S_ISDIR(tmp_inode->i_mode)) {
289 cFYI(1, ("Directory inode"));
290 tmp_inode->i_op = &cifs_dir_inode_ops;
291 tmp_inode->i_fop = &cifs_dir_ops;
292 } else if (S_ISLNK(tmp_inode->i_mode)) {
293 cFYI(1, ("Symbolic Link inode"));
294 tmp_inode->i_op = &cifs_symlink_inode_ops;
295 } else {
296 cFYI(1, ("Init special inode"));
297 init_special_inode(tmp_inode, tmp_inode->i_mode,
298 tmp_inode->i_rdev);
299 }
300} 162}
301 163
302static void unix_fill_in_inode(struct inode *tmp_inode, 164void
303 FILE_UNIX_INFO *pfindData, unsigned int *pobject_type, int isNewInode) 165cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
166 struct cifs_sb_info *cifs_sb)
304{ 167{
305 loff_t local_size; 168 int offset = cifs_sb->tcon->ses->server->timeAdj;
306 struct timespec local_mtime;
307
308 struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
309 struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
310
311 __u32 type = le32_to_cpu(pfindData->Type);
312 __u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes);
313 __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
314 cifsInfo->time = jiffies;
315 atomic_inc(&cifsInfo->inUse);
316
317 /* save mtime and size */
318 local_mtime = tmp_inode->i_mtime;
319 local_size = tmp_inode->i_size;
320
321 tmp_inode->i_atime =
322 cifs_NTtimeToUnix(pfindData->LastAccessTime);
323 tmp_inode->i_mtime =
324 cifs_NTtimeToUnix(pfindData->LastModificationTime);
325 tmp_inode->i_ctime =
326 cifs_NTtimeToUnix(pfindData->LastStatusChange);
327
328 tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
329 /* since we set the inode type below we need to mask off type
330 to avoid strange results if bits above were corrupt */
331 tmp_inode->i_mode &= ~S_IFMT;
332 if (type == UNIX_FILE) {
333 *pobject_type = DT_REG;
334 tmp_inode->i_mode |= S_IFREG;
335 } else if (type == UNIX_SYMLINK) {
336 *pobject_type = DT_LNK;
337 tmp_inode->i_mode |= S_IFLNK;
338 } else if (type == UNIX_DIR) {
339 *pobject_type = DT_DIR;
340 tmp_inode->i_mode |= S_IFDIR;
341 } else if (type == UNIX_CHARDEV) {
342 *pobject_type = DT_CHR;
343 tmp_inode->i_mode |= S_IFCHR;
344 tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
345 le64_to_cpu(pfindData->DevMinor) & MINORMASK);
346 } else if (type == UNIX_BLOCKDEV) {
347 *pobject_type = DT_BLK;
348 tmp_inode->i_mode |= S_IFBLK;
349 tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
350 le64_to_cpu(pfindData->DevMinor) & MINORMASK);
351 } else if (type == UNIX_FIFO) {
352 *pobject_type = DT_FIFO;
353 tmp_inode->i_mode |= S_IFIFO;
354 } else if (type == UNIX_SOCKET) {
355 *pobject_type = DT_SOCK;
356 tmp_inode->i_mode |= S_IFSOCK;
357 } else {
358 /* safest to just call it a file */
359 *pobject_type = DT_REG;
360 tmp_inode->i_mode |= S_IFREG;
361 cFYI(1, ("unknown inode type %d", type));
362 }
363 169
364 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 170 memset(fattr, 0, sizeof(*fattr));
365 tmp_inode->i_uid = cifs_sb->mnt_uid; 171 fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
366 else 172 info->LastAccessTime, offset);
367 tmp_inode->i_uid = le64_to_cpu(pfindData->Uid); 173 fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate,
368 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 174 info->LastWriteTime, offset);
369 tmp_inode->i_gid = cifs_sb->mnt_gid; 175 fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate,
370 else 176 info->LastWriteTime, offset);
371 tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
372 tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
373
374 cifsInfo->server_eof = end_of_file;
375 spin_lock(&tmp_inode->i_lock);
376 if (is_size_safe_to_change(cifsInfo, end_of_file)) {
377 /* can not safely change the file size here if the
378 client is writing to it due to potential races */
379 i_size_write(tmp_inode, end_of_file);
380
381 /* 512 bytes (2**9) is the fake blocksize that must be used */
382 /* for this calculation, not the real blocksize */
383 tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
384 }
385 spin_unlock(&tmp_inode->i_lock);
386 177
387 if (S_ISREG(tmp_inode->i_mode)) { 178 fattr->cf_cifsattrs = le16_to_cpu(info->Attributes);
388 cFYI(1, ("File inode")); 179 fattr->cf_bytes = le32_to_cpu(info->AllocationSize);
389 tmp_inode->i_op = &cifs_file_inode_ops; 180 fattr->cf_eof = le32_to_cpu(info->DataSize);
390 181
391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 182 cifs_fill_common_info(fattr, cifs_sb);
392 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
393 tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
394 else
395 tmp_inode->i_fop = &cifs_file_direct_ops;
396 } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
397 tmp_inode->i_fop = &cifs_file_nobrl_ops;
398 else
399 tmp_inode->i_fop = &cifs_file_ops;
400
401 if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
402 (cifs_sb->tcon->ses->server->maxBuf <
403 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
404 tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
405 else
406 tmp_inode->i_data.a_ops = &cifs_addr_ops;
407
408 if (isNewInode)
409 return; /* No sense invalidating pages for new inode
410 since we have not started caching readahead
411 file data for it yet */
412
413 if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
414 (local_size == tmp_inode->i_size)) {
415 cFYI(1, ("inode exists but unchanged"));
416 } else {
417 /* file may have changed on server */
418 cFYI(1, ("invalidate inode, readdir detected change"));
419 invalidate_remote_inode(tmp_inode);
420 }
421 } else if (S_ISDIR(tmp_inode->i_mode)) {
422 cFYI(1, ("Directory inode"));
423 tmp_inode->i_op = &cifs_dir_inode_ops;
424 tmp_inode->i_fop = &cifs_dir_ops;
425 } else if (S_ISLNK(tmp_inode->i_mode)) {
426 cFYI(1, ("Symbolic Link inode"));
427 tmp_inode->i_op = &cifs_symlink_inode_ops;
428/* tmp_inode->i_fop = *//* do not need to set to anything */
429 } else {
430 cFYI(1, ("Special inode"));
431 init_special_inode(tmp_inode, tmp_inode->i_mode,
432 tmp_inode->i_rdev);
433 }
434} 183}
435 184
436/* BB eventually need to add the following helper function to 185/* BB eventually need to add the following helper function to
@@ -872,7 +621,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
872 len = strnlen(filename, PATH_MAX); 621 len = strnlen(filename, PATH_MAX);
873 } 622 }
874 623
875 *pinum = le64_to_cpu(pFindData->UniqueId); 624 *pinum = le64_to_cpu(pFindData->basic.UniqueId);
876 } else if (level == SMB_FIND_FILE_DIRECTORY_INFO) { 625 } else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
877 FILE_DIRECTORY_INFO *pFindData = 626 FILE_DIRECTORY_INFO *pFindData =
878 (FILE_DIRECTORY_INFO *)current_entry; 627 (FILE_DIRECTORY_INFO *)current_entry;
@@ -932,11 +681,12 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
932 int rc = 0; 681 int rc = 0;
933 struct qstr qstring; 682 struct qstr qstring;
934 struct cifsFileInfo *pCifsF; 683 struct cifsFileInfo *pCifsF;
935 unsigned int obj_type; 684 u64 inum;
936 __u64 inum; 685 ino_t ino;
686 struct super_block *sb;
937 struct cifs_sb_info *cifs_sb; 687 struct cifs_sb_info *cifs_sb;
938 struct inode *tmp_inode;
939 struct dentry *tmp_dentry; 688 struct dentry *tmp_dentry;
689 struct cifs_fattr fattr;
940 690
941 /* get filename and len into qstring */ 691 /* get filename and len into qstring */
942 /* get dentry */ 692 /* get dentry */
@@ -954,60 +704,53 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
954 if (rc != 0) 704 if (rc != 0)
955 return 0; 705 return 0;
956 706
957 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 707 sb = file->f_path.dentry->d_sb;
708 cifs_sb = CIFS_SB(sb);
958 709
959 qstring.name = scratch_buf; 710 qstring.name = scratch_buf;
960 rc = cifs_get_name_from_search_buf(&qstring, pfindEntry, 711 rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
961 pCifsF->srch_inf.info_level, 712 pCifsF->srch_inf.info_level,
962 pCifsF->srch_inf.unicode, cifs_sb, 713 pCifsF->srch_inf.unicode, cifs_sb,
963 max_len, 714 max_len, &inum /* returned */);
964 &inum /* returned */);
965 715
966 if (rc) 716 if (rc)
967 return rc; 717 return rc;
968 718
969 /* only these two infolevels return valid inode numbers */
970 if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX ||
971 pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
972 rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
973 &inum);
974 else
975 rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
976 NULL);
977
978 if ((tmp_inode == NULL) || (tmp_dentry == NULL))
979 return -ENOMEM;
980
981 /* we pass in rc below, indicating whether it is a new inode,
982 so we can figure out whether to invalidate the inode cached
983 data if the file has changed */
984 if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) 719 if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
985 unix_fill_in_inode(tmp_inode, 720 cifs_unix_basic_to_fattr(&fattr,
986 (FILE_UNIX_INFO *)pfindEntry, 721 &((FILE_UNIX_INFO *) pfindEntry)->basic,
987 &obj_type, rc); 722 cifs_sb);
988 else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) 723 else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
989 fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */, 724 cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)
990 pfindEntry, &obj_type, rc); 725 pfindEntry, cifs_sb);
991 else 726 else
992 fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc); 727 cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)
728 pfindEntry, cifs_sb);
993 729
994 if (rc) /* new inode - needs to be tied to dentry */ { 730 /* FIXME: make _to_fattr functions fill this out */
995 d_instantiate(tmp_dentry, tmp_inode); 731 if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
996 if (rc == 2) 732 fattr.cf_uniqueid = inum;
997 d_rehash(tmp_dentry); 733 else
998 } 734 fattr.cf_uniqueid = iunique(sb, ROOT_I);
999 735
736 ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
737 tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);
1000 738
1001 rc = filldir(direntry, qstring.name, qstring.len, file->f_pos, 739 rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
1002 tmp_inode->i_ino, obj_type); 740 ino, fattr.cf_dtype);
741
742 /*
743 * we can not return filldir errors to the caller since they are
744 * "normal" when the stat blocksize is too small - we return remapped
745 * error instead
746 *
747 * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above
748 * case already. Why should we be clobbering other errors from it?
749 */
1003 if (rc) { 750 if (rc) {
1004 cFYI(1, ("filldir rc = %d", rc)); 751 cFYI(1, ("filldir rc = %d", rc));
1005 /* we can not return filldir errors to the caller
1006 since they are "normal" when the stat blocksize
1007 is too small - we return remapped error instead */
1008 rc = -EOVERFLOW; 752 rc = -EOVERFLOW;
1009 } 753 }
1010
1011 dput(tmp_dentry); 754 dput(tmp_dentry);
1012 return rc; 755 return rc;
1013} 756}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 897a052270f..7085a6275c4 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -802,7 +802,7 @@ ssetup_ntlmssp_authenticate:
802#endif /* CONFIG_CIFS_UPCALL */ 802#endif /* CONFIG_CIFS_UPCALL */
803 } else { 803 } else {
804#ifdef CONFIG_CIFS_EXPERIMENTAL 804#ifdef CONFIG_CIFS_EXPERIMENTAL
805 if ((experimEnabled > 1) && (type == RawNTLMSSP)) { 805 if (type == RawNTLMSSP) {
806 if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { 806 if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
807 cERROR(1, ("NTLMSSP requires Unicode support")); 807 cERROR(1, ("NTLMSSP requires Unicode support"));
808 rc = -ENOSYS; 808 rc = -ENOSYS;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index e9527eedc63..a75afa3dd9e 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -64,8 +64,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
64 64
65 full_path = build_path_from_dentry(direntry); 65 full_path = build_path_from_dentry(direntry);
66 if (full_path == NULL) { 66 if (full_path == NULL) {
67 rc = -ENOMEM;
67 FreeXid(xid); 68 FreeXid(xid);
68 return -ENOMEM; 69 return rc;
69 } 70 }
70 if (ea_name == NULL) { 71 if (ea_name == NULL) {
71 cFYI(1, ("Null xattr names not supported")); 72 cFYI(1, ("Null xattr names not supported"));
@@ -118,8 +119,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
118 119
119 full_path = build_path_from_dentry(direntry); 120 full_path = build_path_from_dentry(direntry);
120 if (full_path == NULL) { 121 if (full_path == NULL) {
122 rc = -ENOMEM;
121 FreeXid(xid); 123 FreeXid(xid);
122 return -ENOMEM; 124 return rc;
123 } 125 }
124 /* return dos attributes as pseudo xattr */ 126 /* return dos attributes as pseudo xattr */
125 /* return alt name if available as pseudo attr */ 127 /* return alt name if available as pseudo attr */
@@ -225,8 +227,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
225 227
226 full_path = build_path_from_dentry(direntry); 228 full_path = build_path_from_dentry(direntry);
227 if (full_path == NULL) { 229 if (full_path == NULL) {
230 rc = -ENOMEM;
228 FreeXid(xid); 231 FreeXid(xid);
229 return -ENOMEM; 232 return rc;
230 } 233 }
231 /* return dos attributes as pseudo xattr */ 234 /* return dos attributes as pseudo xattr */
232 /* return alt name if available as pseudo attr */ 235 /* return alt name if available as pseudo attr */
@@ -351,8 +354,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
351 354
352 full_path = build_path_from_dentry(direntry); 355 full_path = build_path_from_dentry(direntry);
353 if (full_path == NULL) { 356 if (full_path == NULL) {
357 rc = -ENOMEM;
354 FreeXid(xid); 358 FreeXid(xid);
355 return -ENOMEM; 359 return rc;
356 } 360 }
357 /* return dos attributes as pseudo xattr */ 361 /* return dos attributes as pseudo xattr */
358 /* return alt name if available as pseudo attr */ 362 /* return alt name if available as pseudo attr */
diff --git a/fs/compat.c b/fs/compat.c
index cdd51a3a7c5..94502dab972 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -32,7 +32,6 @@
32#include <linux/smb_mount.h> 32#include <linux/smb_mount.h>
33#include <linux/ncp_mount.h> 33#include <linux/ncp_mount.h>
34#include <linux/nfs4_mount.h> 34#include <linux/nfs4_mount.h>
35#include <linux/smp_lock.h>
36#include <linux/syscalls.h> 35#include <linux/syscalls.h>
37#include <linux/ctype.h> 36#include <linux/ctype.h>
38#include <linux/module.h> 37#include <linux/module.h>
@@ -1486,8 +1485,8 @@ int compat_do_execve(char * filename,
1486 if (!bprm) 1485 if (!bprm)
1487 goto out_files; 1486 goto out_files;
1488 1487
1489 retval = mutex_lock_interruptible(&current->cred_guard_mutex); 1488 retval = -ERESTARTNOINTR;
1490 if (retval < 0) 1489 if (mutex_lock_interruptible(&current->cred_guard_mutex))
1491 goto out_free; 1490 goto out_free;
1492 current->in_execve = 1; 1491 current->in_execve = 1;
1493 1492
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 626c7483b4d..f91fd51b32e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -19,6 +19,7 @@
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/smp_lock.h>
22#include <linux/ioctl.h> 23#include <linux/ioctl.h>
23#include <linux/if.h> 24#include <linux/if.h>
24#include <linux/if_bridge.h> 25#include <linux/if_bridge.h>
@@ -1904,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX)
1904COMPATIBLE_IOCTL(FIOASYNC) 1905COMPATIBLE_IOCTL(FIOASYNC)
1905COMPATIBLE_IOCTL(FIONBIO) 1906COMPATIBLE_IOCTL(FIONBIO)
1906COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ 1907COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
1908COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
1907/* 0x00 */ 1909/* 0x00 */
1908COMPATIBLE_IOCTL(FIBMAP) 1910COMPATIBLE_IOCTL(FIBMAP)
1909COMPATIBLE_IOCTL(FIGETBSZ) 1911COMPATIBLE_IOCTL(FIGETBSZ)
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 205ec95b347..eb507c453c5 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -435,7 +435,7 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
435static int find_rsb(struct dlm_ls *ls, char *name, int namelen, 435static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
436 unsigned int flags, struct dlm_rsb **r_ret) 436 unsigned int flags, struct dlm_rsb **r_ret)
437{ 437{
438 struct dlm_rsb *r, *tmp; 438 struct dlm_rsb *r = NULL, *tmp;
439 uint32_t hash, bucket; 439 uint32_t hash, bucket;
440 int error = -EINVAL; 440 int error = -EINVAL;
441 441
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index cdb580a9c7a..618a60f0388 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -902,7 +902,7 @@ static void tcp_connect_to_sock(struct connection *con)
902 int result = -EHOSTUNREACH; 902 int result = -EHOSTUNREACH;
903 struct sockaddr_storage saddr, src_addr; 903 struct sockaddr_storage saddr, src_addr;
904 int addr_len; 904 int addr_len;
905 struct socket *sock; 905 struct socket *sock = NULL;
906 906
907 if (con->nodeid == 0) { 907 if (con->nodeid == 0) {
908 log_print("attempt to connect sock 0 foiled"); 908 log_print("attempt to connect sock 0 foiled");
@@ -962,6 +962,8 @@ out_err:
962 if (con->sock) { 962 if (con->sock) {
963 sock_release(con->sock); 963 sock_release(con->sock);
964 con->sock = NULL; 964 con->sock = NULL;
965 } else if (sock) {
966 sock_release(sock);
965 } 967 }
966 /* 968 /*
967 * Some errors are fatal and this list might need adjusting. For other 969 * Some errors are fatal and this list might need adjusting. For other
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 894a32d438d..16f682e26c0 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -353,7 +353,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
353{ 353{
354 struct dlm_plock_info info; 354 struct dlm_plock_info info;
355 struct plock_op *op; 355 struct plock_op *op;
356 int found = 0; 356 int found = 0, do_callback = 0;
357 357
358 if (count != sizeof(info)) 358 if (count != sizeof(info))
359 return -EINVAL; 359 return -EINVAL;
@@ -366,21 +366,24 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
366 366
367 spin_lock(&ops_lock); 367 spin_lock(&ops_lock);
368 list_for_each_entry(op, &recv_list, list) { 368 list_for_each_entry(op, &recv_list, list) {
369 if (op->info.fsid == info.fsid && op->info.number == info.number && 369 if (op->info.fsid == info.fsid &&
370 op->info.number == info.number &&
370 op->info.owner == info.owner) { 371 op->info.owner == info.owner) {
372 struct plock_xop *xop = (struct plock_xop *)op;
371 list_del_init(&op->list); 373 list_del_init(&op->list);
372 found = 1;
373 op->done = 1;
374 memcpy(&op->info, &info, sizeof(info)); 374 memcpy(&op->info, &info, sizeof(info));
375 if (xop->callback)
376 do_callback = 1;
377 else
378 op->done = 1;
379 found = 1;
375 break; 380 break;
376 } 381 }
377 } 382 }
378 spin_unlock(&ops_lock); 383 spin_unlock(&ops_lock);
379 384
380 if (found) { 385 if (found) {
381 struct plock_xop *xop; 386 if (do_callback)
382 xop = (struct plock_xop *)op;
383 if (xop->callback)
384 dlm_plock_callback(op); 387 dlm_plock_callback(op);
385 else 388 else
386 wake_up(&recv_wq); 389 wake_up(&recv_wq);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index af737bb56cb..259525c9abb 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
1303 } 1303 }
1304 (*new_auth_tok)->session_key.encrypted_key_size = 1304 (*new_auth_tok)->session_key.encrypted_key_size =
1305 (body_size - (ECRYPTFS_SALT_SIZE + 5)); 1305 (body_size - (ECRYPTFS_SALT_SIZE + 5));
1306 if ((*new_auth_tok)->session_key.encrypted_key_size
1307 > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
1308 printk(KERN_WARNING "Tag 3 packet contains key larger "
1309 "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
1310 rc = -EINVAL;
1311 goto out_free;
1312 }
1306 if (unlikely(data[(*packet_size)++] != 0x04)) { 1313 if (unlikely(data[(*packet_size)++] != 0x04)) {
1307 printk(KERN_WARNING "Unknown version number [%d]\n", 1314 printk(KERN_WARNING "Unknown version number [%d]\n",
1308 data[(*packet_size) - 1]); 1315 data[(*packet_size) - 1]);
@@ -1449,6 +1456,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents,
1449 rc = -EINVAL; 1456 rc = -EINVAL;
1450 goto out; 1457 goto out;
1451 } 1458 }
1459 if (unlikely((*tag_11_contents_size) > max_contents_bytes)) {
1460 printk(KERN_ERR "Literal data section in tag 11 packet exceeds "
1461 "expected size\n");
1462 rc = -EINVAL;
1463 goto out;
1464 }
1452 if (data[(*packet_size)++] != 0x62) { 1465 if (data[(*packet_size)++] != 0x62) {
1453 printk(KERN_WARNING "Unrecognizable packet\n"); 1466 printk(KERN_WARNING "Unrecognizable packet\n");
1454 rc = -EINVAL; 1467 rc = -EINVAL;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3f0e1974abd..31d12de83a2 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -14,35 +14,44 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/anon_inodes.h> 16#include <linux/anon_inodes.h>
17#include <linux/eventfd.h>
18#include <linux/syscalls.h> 17#include <linux/syscalls.h>
19#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kref.h>
20#include <linux/eventfd.h>
20 21
21struct eventfd_ctx { 22struct eventfd_ctx {
23 struct kref kref;
22 wait_queue_head_t wqh; 24 wait_queue_head_t wqh;
23 /* 25 /*
24 * Every time that a write(2) is performed on an eventfd, the 26 * Every time that a write(2) is performed on an eventfd, the
25 * value of the __u64 being written is added to "count" and a 27 * value of the __u64 being written is added to "count" and a
26 * wakeup is performed on "wqh". A read(2) will return the "count" 28 * wakeup is performed on "wqh". A read(2) will return the "count"
27 * value to userspace, and will reset "count" to zero. The kernel 29 * value to userspace, and will reset "count" to zero. The kernel
28 * size eventfd_signal() also, adds to the "count" counter and 30 * side eventfd_signal() also, adds to the "count" counter and
29 * issue a wakeup. 31 * issue a wakeup.
30 */ 32 */
31 __u64 count; 33 __u64 count;
32 unsigned int flags; 34 unsigned int flags;
33}; 35};
34 36
35/* 37/**
36 * Adds "n" to the eventfd counter "count". Returns "n" in case of 38 * eventfd_signal - Adds @n to the eventfd counter.
37 * success, or a value lower then "n" in case of coutner overflow. 39 * @ctx: [in] Pointer to the eventfd context.
38 * This function is supposed to be called by the kernel in paths 40 * @n: [in] Value of the counter to be added to the eventfd internal counter.
39 * that do not allow sleeping. In this function we allow the counter 41 * The value cannot be negative.
40 * to reach the ULLONG_MAX value, and we signal this as overflow 42 *
41 * condition by returining a POLLERR to poll(2). 43 * This function is supposed to be called by the kernel in paths that do not
44 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
45 * value, and we signal this as overflow condition by returining a POLLERR
46 * to poll(2).
47 *
48 * Returns @n in case of success, a non-negative number lower than @n in case
49 * of overflow, or the following error codes:
50 *
51 * -EINVAL : The value of @n is negative.
42 */ 52 */
43int eventfd_signal(struct file *file, int n) 53int eventfd_signal(struct eventfd_ctx *ctx, int n)
44{ 54{
45 struct eventfd_ctx *ctx = file->private_data;
46 unsigned long flags; 55 unsigned long flags;
47 56
48 if (n < 0) 57 if (n < 0)
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
59} 68}
60EXPORT_SYMBOL_GPL(eventfd_signal); 69EXPORT_SYMBOL_GPL(eventfd_signal);
61 70
71static void eventfd_free(struct kref *kref)
72{
73 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
74
75 kfree(ctx);
76}
77
78/**
79 * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
80 * @ctx: [in] Pointer to the eventfd context.
81 *
82 * Returns: In case of success, returns a pointer to the eventfd context.
83 */
84struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
85{
86 kref_get(&ctx->kref);
87 return ctx;
88}
89EXPORT_SYMBOL_GPL(eventfd_ctx_get);
90
91/**
92 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
93 * @ctx: [in] Pointer to eventfd context.
94 *
95 * The eventfd context reference must have been previously acquired either
96 * with eventfd_ctx_get() or eventfd_ctx_fdget()).
97 */
98void eventfd_ctx_put(struct eventfd_ctx *ctx)
99{
100 kref_put(&ctx->kref, eventfd_free);
101}
102EXPORT_SYMBOL_GPL(eventfd_ctx_put);
103
62static int eventfd_release(struct inode *inode, struct file *file) 104static int eventfd_release(struct inode *inode, struct file *file)
63{ 105{
64 kfree(file->private_data); 106 struct eventfd_ctx *ctx = file->private_data;
107
108 wake_up_poll(&ctx->wqh, POLLHUP);
109 eventfd_ctx_put(ctx);
65 return 0; 110 return 0;
66} 111}
67 112
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
185 .write = eventfd_write, 230 .write = eventfd_write,
186}; 231};
187 232
233/**
234 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
235 * @fd: [in] Eventfd file descriptor.
236 *
237 * Returns a pointer to the eventfd file structure in case of success, or the
238 * following error pointer:
239 *
240 * -EBADF : Invalid @fd file descriptor.
241 * -EINVAL : The @fd file descriptor is not an eventfd file.
242 */
188struct file *eventfd_fget(int fd) 243struct file *eventfd_fget(int fd)
189{ 244{
190 struct file *file; 245 struct file *file;
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
201} 256}
202EXPORT_SYMBOL_GPL(eventfd_fget); 257EXPORT_SYMBOL_GPL(eventfd_fget);
203 258
259/**
260 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
261 * @fd: [in] Eventfd file descriptor.
262 *
263 * Returns a pointer to the internal eventfd context, otherwise the error
264 * pointers returned by the following functions:
265 *
266 * eventfd_fget
267 */
268struct eventfd_ctx *eventfd_ctx_fdget(int fd)
269{
270 struct file *file;
271 struct eventfd_ctx *ctx;
272
273 file = eventfd_fget(fd);
274 if (IS_ERR(file))
275 return (struct eventfd_ctx *) file;
276 ctx = eventfd_ctx_get(file->private_data);
277 fput(file);
278
279 return ctx;
280}
281EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
282
283/**
284 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
285 * @file: [in] Eventfd file pointer.
286 *
287 * Returns a pointer to the internal eventfd context, otherwise the error
288 * pointer:
289 *
290 * -EINVAL : The @fd file descriptor is not an eventfd file.
291 */
292struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
293{
294 if (file->f_op != &eventfd_fops)
295 return ERR_PTR(-EINVAL);
296
297 return eventfd_ctx_get(file->private_data);
298}
299EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
300
204SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) 301SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
205{ 302{
206 int fd; 303 int fd;
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
217 if (!ctx) 314 if (!ctx)
218 return -ENOMEM; 315 return -ENOMEM;
219 316
317 kref_init(&ctx->kref);
220 init_waitqueue_head(&ctx->wqh); 318 init_waitqueue_head(&ctx->wqh);
221 ctx->count = count; 319 ctx->count = count;
222 ctx->flags = flags; 320 ctx->flags = flags;
diff --git a/fs/exec.c b/fs/exec.c
index e639957d7a5..4a8849e45b2 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1277,8 +1277,8 @@ int do_execve(char * filename,
1277 if (!bprm) 1277 if (!bprm)
1278 goto out_files; 1278 goto out_files;
1279 1279
1280 retval = mutex_lock_interruptible(&current->cred_guard_mutex); 1280 retval = -ERESTARTNOINTR;
1281 if (retval < 0) 1281 if (mutex_lock_interruptible(&current->cred_guard_mutex))
1282 goto out_free; 1282 goto out_free;
1283 current->in_execve = 1; 1283 current->in_execve = 1;
1284 1284
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index 24667eedc02..c6718e4817f 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -2,9 +2,7 @@
2 * common.h - Common definitions for both Kernel and user-mode utilities 2 * common.h - Common definitions for both Kernel and user-mode utilities
3 * 3 *
4 * Copyright (C) 2005, 2006 4 * Copyright (C) 2005, 2006
5 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 5 * Avishay Traeger (avishay@gmail.com)
6 * Copyright (C) 2005, 2006
7 * International Business Machines
8 * Copyright (C) 2008, 2009 6 * Copyright (C) 2008, 2009
9 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <bharrosh@panasas.com>
10 * 8 *
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 65b0c8c776a..4cfab1cc75c 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 0fd4c785967..5ec72e020b2 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
@@ -156,6 +154,9 @@ ino_t exofs_parent_ino(struct dentry *child);
156int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *, 154int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
157 struct inode *); 155 struct inode *);
158 156
157/* super.c */
158int exofs_sync_fs(struct super_block *sb, int wait);
159
159/********************* 160/*********************
160 * operation vectors * 161 * operation vectors *
161 *********************/ 162 *********************/
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 6ed7fe48475..839b9dc1e70 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
@@ -47,16 +45,23 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry,
47{ 45{
48 int ret; 46 int ret;
49 struct address_space *mapping = filp->f_mapping; 47 struct address_space *mapping = filp->f_mapping;
48 struct inode *inode = dentry->d_inode;
49 struct super_block *sb;
50 50
51 ret = filemap_write_and_wait(mapping); 51 ret = filemap_write_and_wait(mapping);
52 if (ret) 52 if (ret)
53 return ret; 53 return ret;
54 54
55 /*Note: file_fsync below also calles sync_blockdev, which is a no-op 55 /* sync the inode attributes */
56 * for exofs, but other then that it does sync_inode and 56 ret = write_inode_now(inode, 1);
57 * sync_superblock which is what we need here. 57
58 */ 58 /* This is a good place to write the sb */
59 return file_fsync(filp, dentry, datasync); 59 /* TODO: Sechedule an sb-sync on create */
60 sb = inode->i_sb;
61 if (sb->s_dirt)
62 exofs_sync_fs(sb, 1);
63
64 return ret;
60} 65}
61 66
62static int exofs_flush(struct file *file, fl_owner_t id) 67static int exofs_flush(struct file *file, fl_owner_t id)
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 77d0a295eb1..6c10f747669 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
@@ -295,6 +293,9 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
295err: 293err:
296 if (!is_sync) 294 if (!is_sync)
297 _unlock_pcol_pages(pcol, ret, READ); 295 _unlock_pcol_pages(pcol, ret, READ);
296 else /* Pages unlocked by caller in sync mode only free bio */
297 pcol_free(pcol);
298
298 kfree(pcol_copy); 299 kfree(pcol_copy);
299 if (or) 300 if (or)
300 osd_end_request(or); 301 osd_end_request(or);
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 77fdd765e76..b7dd0c23686 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b3d2ccb87aa..4372542df28 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8216c5b77b5..5ab10c3bbeb 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
@@ -33,6 +31,7 @@
33 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 */ 32 */
35 33
34#include <linux/smp_lock.h>
36#include <linux/string.h> 35#include <linux/string.h>
37#include <linux/parser.h> 36#include <linux/parser.h>
38#include <linux/vfs.h> 37#include <linux/vfs.h>
@@ -200,7 +199,7 @@ static const struct export_operations exofs_export_ops;
200/* 199/*
201 * Write the superblock to the OSD 200 * Write the superblock to the OSD
202 */ 201 */
203static int exofs_sync_fs(struct super_block *sb, int wait) 202int exofs_sync_fs(struct super_block *sb, int wait)
204{ 203{
205 struct exofs_sb_info *sbi; 204 struct exofs_sb_info *sbi;
206 struct exofs_fscb *fscb; 205 struct exofs_fscb *fscb;
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
index 36e2d7bc7f7..4dd687c3e74 100644
--- a/fs/exofs/symlink.c
+++ b/fs/exofs/symlink.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2005, 2006
5 * International Business Machines
6 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <bharrosh@panasas.com>
8 * 6 *
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 7cb4badef92..e7431309bdc 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -13,7 +13,6 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/compat.h> 14#include <linux/compat.h>
15#include <linux/mount.h> 15#include <linux/mount.h>
16#include <linux/smp_lock.h>
17#include <asm/current.h> 16#include <asm/current.h>
18#include <asm/uaccess.h> 17#include <asm/uaccess.h>
19 18
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 6524ecaebb7..e1dedb0f787 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
66 inode = NULL; 66 inode = NULL;
67 if (ino) { 67 if (ino) {
68 inode = ext2_iget(dir->i_sb, ino); 68 inode = ext2_iget(dir->i_sb, ino);
69 if (IS_ERR(inode)) 69 if (unlikely(IS_ERR(inode))) {
70 return ERR_CAST(inode); 70 if (PTR_ERR(inode) == -ESTALE) {
71 ext2_error(dir->i_sb, __func__,
72 "deleted inode referenced: %lu",
73 ino);
74 return ERR_PTR(-EIO);
75 } else {
76 return ERR_CAST(inode);
77 }
78 }
71 } 79 }
72 return d_splice_alias(inode, dentry); 80 return d_splice_alias(inode, dentry);
73} 81}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 3d724a95882..373fa90c796 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp,
130 struct buffer_head *bh = NULL; 130 struct buffer_head *bh = NULL;
131 131
132 map_bh.b_state = 0; 132 map_bh.b_state = 0;
133 err = ext3_get_blocks_handle(NULL, inode, blk, 1, 133 err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
134 &map_bh, 0, 0);
135 if (err > 0) { 134 if (err > 0) {
136 pgoff_t index = map_bh.b_blocknr >> 135 pgoff_t index = map_bh.b_blocknr >>
137 (PAGE_CACHE_SHIFT - inode->i_blkbits); 136 (PAGE_CACHE_SHIFT - inode->i_blkbits);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5f51fed5c75..b49908a167a 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -788,7 +788,7 @@ err_out:
788int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, 788int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
789 sector_t iblock, unsigned long maxblocks, 789 sector_t iblock, unsigned long maxblocks,
790 struct buffer_head *bh_result, 790 struct buffer_head *bh_result,
791 int create, int extend_disksize) 791 int create)
792{ 792{
793 int err = -EIO; 793 int err = -EIO;
794 int offsets[4]; 794 int offsets[4];
@@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
911 if (!err) 911 if (!err)
912 err = ext3_splice_branch(handle, inode, iblock, 912 err = ext3_splice_branch(handle, inode, iblock,
913 partial, indirect_blks, count); 913 partial, indirect_blks, count);
914 /*
915 * i_disksize growing is protected by truncate_mutex. Don't forget to
916 * protect it if you're about to implement concurrent
917 * ext3_get_block() -bzzz
918 */
919 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
920 ei->i_disksize = inode->i_size;
921 mutex_unlock(&ei->truncate_mutex); 914 mutex_unlock(&ei->truncate_mutex);
922 if (err) 915 if (err)
923 goto cleanup; 916 goto cleanup;
@@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock,
972 } 965 }
973 966
974 ret = ext3_get_blocks_handle(handle, inode, iblock, 967 ret = ext3_get_blocks_handle(handle, inode, iblock,
975 max_blocks, bh_result, create, 0); 968 max_blocks, bh_result, create);
976 if (ret > 0) { 969 if (ret > 0) {
977 bh_result->b_size = (ret << inode->i_blkbits); 970 bh_result->b_size = (ret << inode->i_blkbits);
978 ret = 0; 971 ret = 0;
@@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1005 dummy.b_blocknr = -1000; 998 dummy.b_blocknr = -1000;
1006 buffer_trace_init(&dummy.b_history); 999 buffer_trace_init(&dummy.b_history);
1007 err = ext3_get_blocks_handle(handle, inode, block, 1, 1000 err = ext3_get_blocks_handle(handle, inode, block, 1,
1008 &dummy, create, 1); 1001 &dummy, create);
1009 /* 1002 /*
1010 * ext3_get_blocks_handle() returns number of blocks 1003 * ext3_get_blocks_handle() returns number of blocks
1011 * mapped. 0 in case of a HOLE. 1004 * mapped. 0 in case of a HOLE.
@@ -1193,15 +1186,16 @@ write_begin_failed:
1193 * i_size_read because we hold i_mutex. 1186 * i_size_read because we hold i_mutex.
1194 * 1187 *
1195 * Add inode to orphan list in case we crash before truncate 1188 * Add inode to orphan list in case we crash before truncate
1196 * finishes. 1189 * finishes. Do this only if ext3_can_truncate() agrees so
1190 * that orphan processing code is happy.
1197 */ 1191 */
1198 if (pos + len > inode->i_size) 1192 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1199 ext3_orphan_add(handle, inode); 1193 ext3_orphan_add(handle, inode);
1200 ext3_journal_stop(handle); 1194 ext3_journal_stop(handle);
1201 unlock_page(page); 1195 unlock_page(page);
1202 page_cache_release(page); 1196 page_cache_release(page);
1203 if (pos + len > inode->i_size) 1197 if (pos + len > inode->i_size)
1204 vmtruncate(inode, inode->i_size); 1198 ext3_truncate(inode);
1205 } 1199 }
1206 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1200 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1207 goto retry; 1201 goto retry;
@@ -1287,7 +1281,7 @@ static int ext3_ordered_write_end(struct file *file,
1287 * There may be allocated blocks outside of i_size because 1281 * There may be allocated blocks outside of i_size because
1288 * we failed to copy some data. Prepare for truncate. 1282 * we failed to copy some data. Prepare for truncate.
1289 */ 1283 */
1290 if (pos + len > inode->i_size) 1284 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1291 ext3_orphan_add(handle, inode); 1285 ext3_orphan_add(handle, inode);
1292 ret2 = ext3_journal_stop(handle); 1286 ret2 = ext3_journal_stop(handle);
1293 if (!ret) 1287 if (!ret)
@@ -1296,7 +1290,7 @@ static int ext3_ordered_write_end(struct file *file,
1296 page_cache_release(page); 1290 page_cache_release(page);
1297 1291
1298 if (pos + len > inode->i_size) 1292 if (pos + len > inode->i_size)
1299 vmtruncate(inode, inode->i_size); 1293 ext3_truncate(inode);
1300 return ret ? ret : copied; 1294 return ret ? ret : copied;
1301} 1295}
1302 1296
@@ -1315,14 +1309,14 @@ static int ext3_writeback_write_end(struct file *file,
1315 * There may be allocated blocks outside of i_size because 1309 * There may be allocated blocks outside of i_size because
1316 * we failed to copy some data. Prepare for truncate. 1310 * we failed to copy some data. Prepare for truncate.
1317 */ 1311 */
1318 if (pos + len > inode->i_size) 1312 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1319 ext3_orphan_add(handle, inode); 1313 ext3_orphan_add(handle, inode);
1320 ret = ext3_journal_stop(handle); 1314 ret = ext3_journal_stop(handle);
1321 unlock_page(page); 1315 unlock_page(page);
1322 page_cache_release(page); 1316 page_cache_release(page);
1323 1317
1324 if (pos + len > inode->i_size) 1318 if (pos + len > inode->i_size)
1325 vmtruncate(inode, inode->i_size); 1319 ext3_truncate(inode);
1326 return ret ? ret : copied; 1320 return ret ? ret : copied;
1327} 1321}
1328 1322
@@ -1358,7 +1352,7 @@ static int ext3_journalled_write_end(struct file *file,
1358 * There may be allocated blocks outside of i_size because 1352 * There may be allocated blocks outside of i_size because
1359 * we failed to copy some data. Prepare for truncate. 1353 * we failed to copy some data. Prepare for truncate.
1360 */ 1354 */
1361 if (pos + len > inode->i_size) 1355 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1362 ext3_orphan_add(handle, inode); 1356 ext3_orphan_add(handle, inode);
1363 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1357 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1364 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1358 if (inode->i_size > EXT3_I(inode)->i_disksize) {
@@ -1375,7 +1369,7 @@ static int ext3_journalled_write_end(struct file *file,
1375 page_cache_release(page); 1369 page_cache_release(page);
1376 1370
1377 if (pos + len > inode->i_size) 1371 if (pos + len > inode->i_size)
1378 vmtruncate(inode, inode->i_size); 1372 ext3_truncate(inode);
1379 return ret ? ret : copied; 1373 return ret ? ret : copied;
1380} 1374}
1381 1375
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0ddf7e55abe..9714db393ef 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t;
93struct ext4_allocation_request { 93struct ext4_allocation_request {
94 /* target inode for block we're allocating */ 94 /* target inode for block we're allocating */
95 struct inode *inode; 95 struct inode *inode;
96 /* how many blocks we want to allocate */
97 unsigned int len;
96 /* logical block in target inode */ 98 /* logical block in target inode */
97 ext4_lblk_t logical; 99 ext4_lblk_t logical;
98 /* phys. target (a hint) */
99 ext4_fsblk_t goal;
100 /* the closest logical allocated block to the left */ 100 /* the closest logical allocated block to the left */
101 ext4_lblk_t lleft; 101 ext4_lblk_t lleft;
102 /* phys. block for ^^^ */
103 ext4_fsblk_t pleft;
104 /* the closest logical allocated block to the right */ 102 /* the closest logical allocated block to the right */
105 ext4_lblk_t lright; 103 ext4_lblk_t lright;
106 /* phys. block for ^^^ */ 104 /* phys. target (a hint) */
105 ext4_fsblk_t goal;
106 /* phys. block for the closest logical allocated block to the left */
107 ext4_fsblk_t pleft;
108 /* phys. block for the closest logical allocated block to the right */
107 ext4_fsblk_t pright; 109 ext4_fsblk_t pright;
108 /* how many blocks we want to allocate */
109 unsigned int len;
110 /* flags. see above EXT4_MB_HINT_* */ 110 /* flags. see above EXT4_MB_HINT_* */
111 unsigned int flags; 111 unsigned int flags;
112}; 112};
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index ad13a84644e..eb27fd0f2ee 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
43 ext4_journal_abort_handle(where, __func__, bh, 43 ext4_journal_abort_handle(where, __func__, bh,
44 handle, err); 44 handle, err);
45 } 45 }
46 else
47 brelse(bh);
46 return err; 48 return err;
47} 49}
48 50
@@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
57 ext4_journal_abort_handle(where, __func__, bh, 59 ext4_journal_abort_handle(where, __func__, bh,
58 handle, err); 60 handle, err);
59 } 61 }
62 else
63 brelse(bh);
60 return err; 64 return err;
61} 65}
62 66
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index be2f426f680..139fb8cb87e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
131int __ext4_journal_get_write_access(const char *where, handle_t *handle, 131int __ext4_journal_get_write_access(const char *where, handle_t *handle,
132 struct buffer_head *bh); 132 struct buffer_head *bh);
133 133
134/* When called with an invalid handle, this will still do a put on the BH */
134int __ext4_journal_forget(const char *where, handle_t *handle, 135int __ext4_journal_forget(const char *where, handle_t *handle,
135 struct buffer_head *bh); 136 struct buffer_head *bh);
136 137
138/* When called with an invalid handle, this will still do a put on the BH */
137int __ext4_journal_revoke(const char *where, handle_t *handle, 139int __ext4_journal_revoke(const char *where, handle_t *handle,
138 ext4_fsblk_t blocknr, struct buffer_head *bh); 140 ext4_fsblk_t blocknr, struct buffer_head *bh);
139 141
@@ -281,10 +283,10 @@ static inline int ext4_should_order_data(struct inode *inode)
281 283
282static inline int ext4_should_writeback_data(struct inode *inode) 284static inline int ext4_should_writeback_data(struct inode *inode)
283{ 285{
284 if (EXT4_JOURNAL(inode) == NULL)
285 return 0;
286 if (!S_ISREG(inode->i_mode)) 286 if (!S_ISREG(inode->i_mode))
287 return 0; 287 return 0;
288 if (EXT4_JOURNAL(inode) == NULL)
289 return 1;
288 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 290 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
289 return 0; 291 return 0;
290 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 292 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 50322a09bd0..73ebfb44ad7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
1977 */ 1977 */
1978 /* 1 bitmap, 1 block group descriptor */ 1978 /* 1 bitmap, 1 block group descriptor */
1979 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 1979 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
1980 return ret;
1980 } 1981 }
1981 } 1982 }
1982 1983
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2f645732e3b..29e6dc7299b 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
833 if (!goal) 833 if (!goal)
834 goal = sbi->s_inode_goal; 834 goal = sbi->s_inode_goal;
835 835
836 if (goal && goal < le32_to_cpu(sbi->s_es->s_inodes_count)) { 836 if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
837 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); 837 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
838 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); 838 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
839 ret2 = 0; 839 ret2 = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 60a26f3a6f8..f9c642b22ef 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -78,16 +78,14 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
78 * but there may still be a record of it in the journal, and that record 78 * but there may still be a record of it in the journal, and that record
79 * still needs to be revoked. 79 * still needs to be revoked.
80 * 80 *
81 * If the handle isn't valid we're not journaling so there's nothing to do. 81 * If the handle isn't valid we're not journaling, but we still need to
82 * call into ext4_journal_revoke() to put the buffer head.
82 */ 83 */
83int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 84int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
84 struct buffer_head *bh, ext4_fsblk_t blocknr) 85 struct buffer_head *bh, ext4_fsblk_t blocknr)
85{ 86{
86 int err; 87 int err;
87 88
88 if (!ext4_handle_valid(handle))
89 return 0;
90
91 might_sleep(); 89 might_sleep();
92 90
93 BUFFER_TRACE(bh, "enter"); 91 BUFFER_TRACE(bh, "enter");
@@ -1513,14 +1511,14 @@ retry:
1513 * Add inode to orphan list in case we crash before 1511 * Add inode to orphan list in case we crash before
1514 * truncate finishes 1512 * truncate finishes
1515 */ 1513 */
1516 if (pos + len > inode->i_size) 1514 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1517 ext4_orphan_add(handle, inode); 1515 ext4_orphan_add(handle, inode);
1518 1516
1519 ext4_journal_stop(handle); 1517 ext4_journal_stop(handle);
1520 if (pos + len > inode->i_size) { 1518 if (pos + len > inode->i_size) {
1521 vmtruncate(inode, inode->i_size); 1519 ext4_truncate(inode);
1522 /* 1520 /*
1523 * If vmtruncate failed early the inode might 1521 * If truncate failed early the inode might
1524 * still be on the orphan list; we need to 1522 * still be on the orphan list; we need to
1525 * make sure the inode is removed from the 1523 * make sure the inode is removed from the
1526 * orphan list in that case. 1524 * orphan list in that case.
@@ -1614,7 +1612,7 @@ static int ext4_ordered_write_end(struct file *file,
1614 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1612 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1615 page, fsdata); 1613 page, fsdata);
1616 copied = ret2; 1614 copied = ret2;
1617 if (pos + len > inode->i_size) 1615 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1618 /* if we have allocated more blocks and copied 1616 /* if we have allocated more blocks and copied
1619 * less. We will have blocks allocated outside 1617 * less. We will have blocks allocated outside
1620 * inode->i_size. So truncate them 1618 * inode->i_size. So truncate them
@@ -1628,9 +1626,9 @@ static int ext4_ordered_write_end(struct file *file,
1628 ret = ret2; 1626 ret = ret2;
1629 1627
1630 if (pos + len > inode->i_size) { 1628 if (pos + len > inode->i_size) {
1631 vmtruncate(inode, inode->i_size); 1629 ext4_truncate(inode);
1632 /* 1630 /*
1633 * If vmtruncate failed early the inode might still be 1631 * If truncate failed early the inode might still be
1634 * on the orphan list; we need to make sure the inode 1632 * on the orphan list; we need to make sure the inode
1635 * is removed from the orphan list in that case. 1633 * is removed from the orphan list in that case.
1636 */ 1634 */
@@ -1655,7 +1653,7 @@ static int ext4_writeback_write_end(struct file *file,
1655 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1653 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1656 page, fsdata); 1654 page, fsdata);
1657 copied = ret2; 1655 copied = ret2;
1658 if (pos + len > inode->i_size) 1656 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1659 /* if we have allocated more blocks and copied 1657 /* if we have allocated more blocks and copied
1660 * less. We will have blocks allocated outside 1658 * less. We will have blocks allocated outside
1661 * inode->i_size. So truncate them 1659 * inode->i_size. So truncate them
@@ -1670,9 +1668,9 @@ static int ext4_writeback_write_end(struct file *file,
1670 ret = ret2; 1668 ret = ret2;
1671 1669
1672 if (pos + len > inode->i_size) { 1670 if (pos + len > inode->i_size) {
1673 vmtruncate(inode, inode->i_size); 1671 ext4_truncate(inode);
1674 /* 1672 /*
1675 * If vmtruncate failed early the inode might still be 1673 * If truncate failed early the inode might still be
1676 * on the orphan list; we need to make sure the inode 1674 * on the orphan list; we need to make sure the inode
1677 * is removed from the orphan list in that case. 1675 * is removed from the orphan list in that case.
1678 */ 1676 */
@@ -1722,7 +1720,7 @@ static int ext4_journalled_write_end(struct file *file,
1722 1720
1723 unlock_page(page); 1721 unlock_page(page);
1724 page_cache_release(page); 1722 page_cache_release(page);
1725 if (pos + len > inode->i_size) 1723 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1726 /* if we have allocated more blocks and copied 1724 /* if we have allocated more blocks and copied
1727 * less. We will have blocks allocated outside 1725 * less. We will have blocks allocated outside
1728 * inode->i_size. So truncate them 1726 * inode->i_size. So truncate them
@@ -1733,9 +1731,9 @@ static int ext4_journalled_write_end(struct file *file,
1733 if (!ret) 1731 if (!ret)
1734 ret = ret2; 1732 ret = ret2;
1735 if (pos + len > inode->i_size) { 1733 if (pos + len > inode->i_size) {
1736 vmtruncate(inode, inode->i_size); 1734 ext4_truncate(inode);
1737 /* 1735 /*
1738 * If vmtruncate failed early the inode might still be 1736 * If truncate failed early the inode might still be
1739 * on the orphan list; we need to make sure the inode 1737 * on the orphan list; we need to make sure the inode
1740 * is removed from the orphan list in that case. 1738 * is removed from the orphan list in that case.
1741 */ 1739 */
@@ -2305,15 +2303,9 @@ flush_it:
2305 return; 2303 return;
2306} 2304}
2307 2305
2308static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 2306static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2309{ 2307{
2310 /* 2308 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2311 * unmapped buffer is possible for holes.
2312 * delay buffer is possible with delayed allocation.
2313 * We also need to consider unwritten buffer as unmapped.
2314 */
2315 return (!buffer_mapped(bh) || buffer_delay(bh) ||
2316 buffer_unwritten(bh)) && buffer_dirty(bh);
2317} 2309}
2318 2310
2319/* 2311/*
@@ -2398,9 +2390,9 @@ static int __mpage_da_writepage(struct page *page,
2398 * We need to try to allocate 2390 * We need to try to allocate
2399 * unmapped blocks in the same page. 2391 * unmapped blocks in the same page.
2400 * Otherwise we won't make progress 2392 * Otherwise we won't make progress
2401 * with the page in ext4_da_writepage 2393 * with the page in ext4_writepage
2402 */ 2394 */
2403 if (ext4_bh_unmapped_or_delay(NULL, bh)) { 2395 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2404 mpage_add_bh_to_extent(mpd, logical, 2396 mpage_add_bh_to_extent(mpd, logical,
2405 bh->b_size, 2397 bh->b_size,
2406 bh->b_state); 2398 bh->b_state);
@@ -2517,7 +2509,6 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2517 * so call get_block_wrap with create = 0 2509 * so call get_block_wrap with create = 0
2518 */ 2510 */
2519 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); 2511 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2520 BUG_ON(create && ret == 0);
2521 if (ret > 0) { 2512 if (ret > 0) {
2522 bh_result->b_size = (ret << inode->i_blkbits); 2513 bh_result->b_size = (ret << inode->i_blkbits);
2523 ret = 0; 2514 ret = 0;
@@ -2525,15 +2516,102 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2525 return ret; 2516 return ret;
2526} 2517}
2527 2518
2519static int bget_one(handle_t *handle, struct buffer_head *bh)
2520{
2521 get_bh(bh);
2522 return 0;
2523}
2524
2525static int bput_one(handle_t *handle, struct buffer_head *bh)
2526{
2527 put_bh(bh);
2528 return 0;
2529}
2530
2531static int __ext4_journalled_writepage(struct page *page,
2532 struct writeback_control *wbc,
2533 unsigned int len)
2534{
2535 struct address_space *mapping = page->mapping;
2536 struct inode *inode = mapping->host;
2537 struct buffer_head *page_bufs;
2538 handle_t *handle = NULL;
2539 int ret = 0;
2540 int err;
2541
2542 page_bufs = page_buffers(page);
2543 BUG_ON(!page_bufs);
2544 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2545 /* As soon as we unlock the page, it can go away, but we have
2546 * references to buffers so we are safe */
2547 unlock_page(page);
2548
2549 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2550 if (IS_ERR(handle)) {
2551 ret = PTR_ERR(handle);
2552 goto out;
2553 }
2554
2555 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2556 do_journal_get_write_access);
2557
2558 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2559 write_end_fn);
2560 if (ret == 0)
2561 ret = err;
2562 err = ext4_journal_stop(handle);
2563 if (!ret)
2564 ret = err;
2565
2566 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2567 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
2568out:
2569 return ret;
2570}
2571
2528/* 2572/*
2573 * Note that we don't need to start a transaction unless we're journaling data
2574 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2575 * need to file the inode to the transaction's list in ordered mode because if
2576 * we are writing back data added by write(), the inode is already there and if
2577 * we are writing back data modified via mmap(), noone guarantees in which
2578 * transaction the data will hit the disk. In case we are journaling data, we
2579 * cannot start transaction directly because transaction start ranks above page
2580 * lock so we have to do some magic.
2581 *
2529 * This function can get called via... 2582 * This function can get called via...
2530 * - ext4_da_writepages after taking page lock (have journal handle) 2583 * - ext4_da_writepages after taking page lock (have journal handle)
2531 * - journal_submit_inode_data_buffers (no journal handle) 2584 * - journal_submit_inode_data_buffers (no journal handle)
2532 * - shrink_page_list via pdflush (no journal handle) 2585 * - shrink_page_list via pdflush (no journal handle)
2533 * - grab_page_cache when doing write_begin (have journal handle) 2586 * - grab_page_cache when doing write_begin (have journal handle)
2587 *
2588 * We don't do any block allocation in this function. If we have page with
2589 * multiple blocks we need to write those buffer_heads that are mapped. This
2590 * is important for mmaped based write. So if we do with blocksize 1K
2591 * truncate(f, 1024);
2592 * a = mmap(f, 0, 4096);
2593 * a[0] = 'a';
2594 * truncate(f, 4096);
2595 * we have in the page first buffer_head mapped via page_mkwrite call back
2596 * but other bufer_heads would be unmapped but dirty(dirty done via the
2597 * do_wp_page). So writepage should write the first block. If we modify
2598 * the mmap area beyond 1024 we will again get a page_fault and the
2599 * page_mkwrite callback will do the block allocation and mark the
2600 * buffer_heads mapped.
2601 *
2602 * We redirty the page if we have any buffer_heads that is either delay or
2603 * unwritten in the page.
2604 *
2605 * We can get recursively called as show below.
2606 *
2607 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2608 * ext4_writepage()
2609 *
2610 * But since we don't do any block allocation we should not deadlock.
2611 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2534 */ 2612 */
2535static int ext4_da_writepage(struct page *page, 2613static int ext4_writepage(struct page *page,
2536 struct writeback_control *wbc) 2614 struct writeback_control *wbc)
2537{ 2615{
2538 int ret = 0; 2616 int ret = 0;
2539 loff_t size; 2617 loff_t size;
@@ -2541,7 +2619,7 @@ static int ext4_da_writepage(struct page *page,
2541 struct buffer_head *page_bufs; 2619 struct buffer_head *page_bufs;
2542 struct inode *inode = page->mapping->host; 2620 struct inode *inode = page->mapping->host;
2543 2621
2544 trace_ext4_da_writepage(inode, page); 2622 trace_ext4_writepage(inode, page);
2545 size = i_size_read(inode); 2623 size = i_size_read(inode);
2546 if (page->index == size >> PAGE_CACHE_SHIFT) 2624 if (page->index == size >> PAGE_CACHE_SHIFT)
2547 len = size & ~PAGE_CACHE_MASK; 2625 len = size & ~PAGE_CACHE_MASK;
@@ -2551,7 +2629,7 @@ static int ext4_da_writepage(struct page *page,
2551 if (page_has_buffers(page)) { 2629 if (page_has_buffers(page)) {
2552 page_bufs = page_buffers(page); 2630 page_bufs = page_buffers(page);
2553 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2631 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2554 ext4_bh_unmapped_or_delay)) { 2632 ext4_bh_delay_or_unwritten)) {
2555 /* 2633 /*
2556 * We don't want to do block allocation 2634 * We don't want to do block allocation
2557 * So redirty the page and return 2635 * So redirty the page and return
@@ -2578,13 +2656,13 @@ static int ext4_da_writepage(struct page *page,
2578 * all are mapped and non delay. We don't want to 2656 * all are mapped and non delay. We don't want to
2579 * do block allocation here. 2657 * do block allocation here.
2580 */ 2658 */
2581 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2659 ret = block_prepare_write(page, 0, len,
2582 noalloc_get_block_write); 2660 noalloc_get_block_write);
2583 if (!ret) { 2661 if (!ret) {
2584 page_bufs = page_buffers(page); 2662 page_bufs = page_buffers(page);
2585 /* check whether all are mapped and non delay */ 2663 /* check whether all are mapped and non delay */
2586 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2664 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2587 ext4_bh_unmapped_or_delay)) { 2665 ext4_bh_delay_or_unwritten)) {
2588 redirty_page_for_writepage(wbc, page); 2666 redirty_page_for_writepage(wbc, page);
2589 unlock_page(page); 2667 unlock_page(page);
2590 return 0; 2668 return 0;
@@ -2600,7 +2678,16 @@ static int ext4_da_writepage(struct page *page,
2600 return 0; 2678 return 0;
2601 } 2679 }
2602 /* now mark the buffer_heads as dirty and uptodate */ 2680 /* now mark the buffer_heads as dirty and uptodate */
2603 block_commit_write(page, 0, PAGE_CACHE_SIZE); 2681 block_commit_write(page, 0, len);
2682 }
2683
2684 if (PageChecked(page) && ext4_should_journal_data(inode)) {
2685 /*
2686 * It's mmapped pagecache. Add buffers and journal it. There
2687 * doesn't seem much point in redirtying the page here.
2688 */
2689 ClearPageChecked(page);
2690 return __ext4_journalled_writepage(page, wbc, len);
2604 } 2691 }
2605 2692
2606 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2693 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -2907,7 +2994,7 @@ retry:
2907 * i_size_read because we hold i_mutex. 2994 * i_size_read because we hold i_mutex.
2908 */ 2995 */
2909 if (pos + len > inode->i_size) 2996 if (pos + len > inode->i_size)
2910 vmtruncate(inode, inode->i_size); 2997 ext4_truncate(inode);
2911 } 2998 }
2912 2999
2913 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3000 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3130,222 +3217,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3130 return generic_block_bmap(mapping, block, ext4_get_block); 3217 return generic_block_bmap(mapping, block, ext4_get_block);
3131} 3218}
3132 3219
3133static int bget_one(handle_t *handle, struct buffer_head *bh)
3134{
3135 get_bh(bh);
3136 return 0;
3137}
3138
3139static int bput_one(handle_t *handle, struct buffer_head *bh)
3140{
3141 put_bh(bh);
3142 return 0;
3143}
3144
3145/*
3146 * Note that we don't need to start a transaction unless we're journaling data
3147 * because we should have holes filled from ext4_page_mkwrite(). We even don't
3148 * need to file the inode to the transaction's list in ordered mode because if
3149 * we are writing back data added by write(), the inode is already there and if
3150 * we are writing back data modified via mmap(), noone guarantees in which
3151 * transaction the data will hit the disk. In case we are journaling data, we
3152 * cannot start transaction directly because transaction start ranks above page
3153 * lock so we have to do some magic.
3154 *
3155 * In all journaling modes block_write_full_page() will start the I/O.
3156 *
3157 * Problem:
3158 *
3159 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
3160 * ext4_writepage()
3161 *
3162 * Similar for:
3163 *
3164 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
3165 *
3166 * Same applies to ext4_get_block(). We will deadlock on various things like
3167 * lock_journal and i_data_sem
3168 *
3169 * Setting PF_MEMALLOC here doesn't work - too many internal memory
3170 * allocations fail.
3171 *
3172 * 16May01: If we're reentered then journal_current_handle() will be
3173 * non-zero. We simply *return*.
3174 *
3175 * 1 July 2001: @@@ FIXME:
3176 * In journalled data mode, a data buffer may be metadata against the
3177 * current transaction. But the same file is part of a shared mapping
3178 * and someone does a writepage() on it.
3179 *
3180 * We will move the buffer onto the async_data list, but *after* it has
3181 * been dirtied. So there's a small window where we have dirty data on
3182 * BJ_Metadata.
3183 *
3184 * Note that this only applies to the last partial page in the file. The
3185 * bit which block_write_full_page() uses prepare/commit for. (That's
3186 * broken code anyway: it's wrong for msync()).
3187 *
3188 * It's a rare case: affects the final partial page, for journalled data
3189 * where the file is subject to bith write() and writepage() in the same
3190 * transction. To fix it we'll need a custom block_write_full_page().
3191 * We'll probably need that anyway for journalling writepage() output.
3192 *
3193 * We don't honour synchronous mounts for writepage(). That would be
3194 * disastrous. Any write() or metadata operation will sync the fs for
3195 * us.
3196 *
3197 */
3198static int __ext4_normal_writepage(struct page *page,
3199 struct writeback_control *wbc)
3200{
3201 struct inode *inode = page->mapping->host;
3202
3203 if (test_opt(inode->i_sb, NOBH))
3204 return nobh_writepage(page, noalloc_get_block_write, wbc);
3205 else
3206 return block_write_full_page(page, noalloc_get_block_write,
3207 wbc);
3208}
3209
3210static int ext4_normal_writepage(struct page *page,
3211 struct writeback_control *wbc)
3212{
3213 struct inode *inode = page->mapping->host;
3214 loff_t size = i_size_read(inode);
3215 loff_t len;
3216
3217 trace_ext4_normal_writepage(inode, page);
3218 J_ASSERT(PageLocked(page));
3219 if (page->index == size >> PAGE_CACHE_SHIFT)
3220 len = size & ~PAGE_CACHE_MASK;
3221 else
3222 len = PAGE_CACHE_SIZE;
3223
3224 if (page_has_buffers(page)) {
3225 /* if page has buffers it should all be mapped
3226 * and allocated. If there are not buffers attached
3227 * to the page we know the page is dirty but it lost
3228 * buffers. That means that at some moment in time
3229 * after write_begin() / write_end() has been called
3230 * all buffers have been clean and thus they must have been
3231 * written at least once. So they are all mapped and we can
3232 * happily proceed with mapping them and writing the page.
3233 */
3234 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3235 ext4_bh_unmapped_or_delay));
3236 }
3237
3238 if (!ext4_journal_current_handle())
3239 return __ext4_normal_writepage(page, wbc);
3240
3241 redirty_page_for_writepage(wbc, page);
3242 unlock_page(page);
3243 return 0;
3244}
3245
3246static int __ext4_journalled_writepage(struct page *page,
3247 struct writeback_control *wbc)
3248{
3249 struct address_space *mapping = page->mapping;
3250 struct inode *inode = mapping->host;
3251 struct buffer_head *page_bufs;
3252 handle_t *handle = NULL;
3253 int ret = 0;
3254 int err;
3255
3256 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3257 noalloc_get_block_write);
3258 if (ret != 0)
3259 goto out_unlock;
3260
3261 page_bufs = page_buffers(page);
3262 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3263 bget_one);
3264 /* As soon as we unlock the page, it can go away, but we have
3265 * references to buffers so we are safe */
3266 unlock_page(page);
3267
3268 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
3269 if (IS_ERR(handle)) {
3270 ret = PTR_ERR(handle);
3271 goto out;
3272 }
3273
3274 ret = walk_page_buffers(handle, page_bufs, 0,
3275 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
3276
3277 err = walk_page_buffers(handle, page_bufs, 0,
3278 PAGE_CACHE_SIZE, NULL, write_end_fn);
3279 if (ret == 0)
3280 ret = err;
3281 err = ext4_journal_stop(handle);
3282 if (!ret)
3283 ret = err;
3284
3285 walk_page_buffers(handle, page_bufs, 0,
3286 PAGE_CACHE_SIZE, NULL, bput_one);
3287 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
3288 goto out;
3289
3290out_unlock:
3291 unlock_page(page);
3292out:
3293 return ret;
3294}
3295
3296static int ext4_journalled_writepage(struct page *page,
3297 struct writeback_control *wbc)
3298{
3299 struct inode *inode = page->mapping->host;
3300 loff_t size = i_size_read(inode);
3301 loff_t len;
3302
3303 trace_ext4_journalled_writepage(inode, page);
3304 J_ASSERT(PageLocked(page));
3305 if (page->index == size >> PAGE_CACHE_SHIFT)
3306 len = size & ~PAGE_CACHE_MASK;
3307 else
3308 len = PAGE_CACHE_SIZE;
3309
3310 if (page_has_buffers(page)) {
3311 /* if page has buffers it should all be mapped
3312 * and allocated. If there are not buffers attached
3313 * to the page we know the page is dirty but it lost
3314 * buffers. That means that at some moment in time
3315 * after write_begin() / write_end() has been called
3316 * all buffers have been clean and thus they must have been
3317 * written at least once. So they are all mapped and we can
3318 * happily proceed with mapping them and writing the page.
3319 */
3320 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3321 ext4_bh_unmapped_or_delay));
3322 }
3323
3324 if (ext4_journal_current_handle())
3325 goto no_write;
3326
3327 if (PageChecked(page)) {
3328 /*
3329 * It's mmapped pagecache. Add buffers and journal it. There
3330 * doesn't seem much point in redirtying the page here.
3331 */
3332 ClearPageChecked(page);
3333 return __ext4_journalled_writepage(page, wbc);
3334 } else {
3335 /*
3336 * It may be a page full of checkpoint-mode buffers. We don't
3337 * really know unless we go poke around in the buffer_heads.
3338 * But block_write_full_page will do the right thing.
3339 */
3340 return block_write_full_page(page, noalloc_get_block_write,
3341 wbc);
3342 }
3343no_write:
3344 redirty_page_for_writepage(wbc, page);
3345 unlock_page(page);
3346 return 0;
3347}
3348
3349static int ext4_readpage(struct file *file, struct page *page) 3220static int ext4_readpage(struct file *file, struct page *page)
3350{ 3221{
3351 return mpage_readpage(page, ext4_get_block); 3222 return mpage_readpage(page, ext4_get_block);
@@ -3492,7 +3363,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
3492static const struct address_space_operations ext4_ordered_aops = { 3363static const struct address_space_operations ext4_ordered_aops = {
3493 .readpage = ext4_readpage, 3364 .readpage = ext4_readpage,
3494 .readpages = ext4_readpages, 3365 .readpages = ext4_readpages,
3495 .writepage = ext4_normal_writepage, 3366 .writepage = ext4_writepage,
3496 .sync_page = block_sync_page, 3367 .sync_page = block_sync_page,
3497 .write_begin = ext4_write_begin, 3368 .write_begin = ext4_write_begin,
3498 .write_end = ext4_ordered_write_end, 3369 .write_end = ext4_ordered_write_end,
@@ -3507,7 +3378,7 @@ static const struct address_space_operations ext4_ordered_aops = {
3507static const struct address_space_operations ext4_writeback_aops = { 3378static const struct address_space_operations ext4_writeback_aops = {
3508 .readpage = ext4_readpage, 3379 .readpage = ext4_readpage,
3509 .readpages = ext4_readpages, 3380 .readpages = ext4_readpages,
3510 .writepage = ext4_normal_writepage, 3381 .writepage = ext4_writepage,
3511 .sync_page = block_sync_page, 3382 .sync_page = block_sync_page,
3512 .write_begin = ext4_write_begin, 3383 .write_begin = ext4_write_begin,
3513 .write_end = ext4_writeback_write_end, 3384 .write_end = ext4_writeback_write_end,
@@ -3522,7 +3393,7 @@ static const struct address_space_operations ext4_writeback_aops = {
3522static const struct address_space_operations ext4_journalled_aops = { 3393static const struct address_space_operations ext4_journalled_aops = {
3523 .readpage = ext4_readpage, 3394 .readpage = ext4_readpage,
3524 .readpages = ext4_readpages, 3395 .readpages = ext4_readpages,
3525 .writepage = ext4_journalled_writepage, 3396 .writepage = ext4_writepage,
3526 .sync_page = block_sync_page, 3397 .sync_page = block_sync_page,
3527 .write_begin = ext4_write_begin, 3398 .write_begin = ext4_write_begin,
3528 .write_end = ext4_journalled_write_end, 3399 .write_end = ext4_journalled_write_end,
@@ -3536,7 +3407,7 @@ static const struct address_space_operations ext4_journalled_aops = {
3536static const struct address_space_operations ext4_da_aops = { 3407static const struct address_space_operations ext4_da_aops = {
3537 .readpage = ext4_readpage, 3408 .readpage = ext4_readpage,
3538 .readpages = ext4_readpages, 3409 .readpages = ext4_readpages,
3539 .writepage = ext4_da_writepage, 3410 .writepage = ext4_writepage,
3540 .writepages = ext4_da_writepages, 3411 .writepages = ext4_da_writepages,
3541 .sync_page = block_sync_page, 3412 .sync_page = block_sync_page,
3542 .write_begin = ext4_da_write_begin, 3413 .write_begin = ext4_da_write_begin,
@@ -3583,7 +3454,8 @@ int ext4_block_truncate_page(handle_t *handle,
3583 struct page *page; 3454 struct page *page;
3584 int err = 0; 3455 int err = 0;
3585 3456
3586 page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 3457 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3458 mapping_gfp_mask(mapping) & ~__GFP_FS);
3587 if (!page) 3459 if (!page)
3588 return -EINVAL; 3460 return -EINVAL;
3589 3461
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bb415408fdb..7050a9cd04a 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -12,7 +12,6 @@
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/time.h> 13#include <linux/time.h>
14#include <linux/compat.h> 14#include <linux/compat.h>
15#include <linux/smp_lock.h>
16#include <linux/mount.h> 15#include <linux/mount.h>
17#include <linux/file.h> 16#include <linux/file.h>
18#include <asm/uaccess.h> 17#include <asm/uaccess.h>
@@ -192,7 +191,7 @@ setversion_out:
192 case EXT4_IOC_GROUP_EXTEND: { 191 case EXT4_IOC_GROUP_EXTEND: {
193 ext4_fsblk_t n_blocks_count; 192 ext4_fsblk_t n_blocks_count;
194 struct super_block *sb = inode->i_sb; 193 struct super_block *sb = inode->i_sb;
195 int err, err2; 194 int err, err2=0;
196 195
197 if (!capable(CAP_SYS_RESOURCE)) 196 if (!capable(CAP_SYS_RESOURCE))
198 return -EPERM; 197 return -EPERM;
@@ -205,9 +204,11 @@ setversion_out:
205 return err; 204 return err;
206 205
207 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); 206 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
208 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 207 if (EXT4_SB(sb)->s_journal) {
209 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); 208 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
210 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 209 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
210 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
211 }
211 if (err == 0) 212 if (err == 0)
212 err = err2; 213 err = err2;
213 mnt_drop_write(filp->f_path.mnt); 214 mnt_drop_write(filp->f_path.mnt);
@@ -252,7 +253,7 @@ setversion_out:
252 case EXT4_IOC_GROUP_ADD: { 253 case EXT4_IOC_GROUP_ADD: {
253 struct ext4_new_group_data input; 254 struct ext4_new_group_data input;
254 struct super_block *sb = inode->i_sb; 255 struct super_block *sb = inode->i_sb;
255 int err, err2; 256 int err, err2=0;
256 257
257 if (!capable(CAP_SYS_RESOURCE)) 258 if (!capable(CAP_SYS_RESOURCE))
258 return -EPERM; 259 return -EPERM;
@@ -266,9 +267,11 @@ setversion_out:
266 return err; 267 return err;
267 268
268 err = ext4_group_add(sb, &input); 269 err = ext4_group_add(sb, &input);
269 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 270 if (EXT4_SB(sb)->s_journal) {
270 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); 271 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
271 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 272 err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
273 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
274 }
272 if (err == 0) 275 if (err == 0)
273 err = err2; 276 err = err2;
274 mnt_drop_write(filp->f_path.mnt); 277 mnt_drop_write(filp->f_path.mnt);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 519a0a686d9..cd258463e2a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
657 } 657 }
658} 658}
659 659
660static void ext4_mb_generate_buddy(struct super_block *sb, 660static noinline_for_stack
661void ext4_mb_generate_buddy(struct super_block *sb,
661 void *buddy, void *bitmap, ext4_group_t group) 662 void *buddy, void *bitmap, ext4_group_t group)
662{ 663{
663 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 664 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
@@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1480 ext4_mb_check_limits(ac, e4b, 0); 1481 ext4_mb_check_limits(ac, e4b, 0);
1481} 1482}
1482 1483
1483static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1484static noinline_for_stack
1485int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1484 struct ext4_buddy *e4b) 1486 struct ext4_buddy *e4b)
1485{ 1487{
1486 struct ext4_free_extent ex = ac->ac_b_ex; 1488 struct ext4_free_extent ex = ac->ac_b_ex;
@@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1507 return 0; 1509 return 0;
1508} 1510}
1509 1511
1510static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1512static noinline_for_stack
1513int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1511 struct ext4_buddy *e4b) 1514 struct ext4_buddy *e4b)
1512{ 1515{
1513 ext4_group_t group = ac->ac_g_ex.fe_group; 1516 ext4_group_t group = ac->ac_g_ex.fe_group;
@@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1566 * The routine scans buddy structures (not bitmap!) from given order 1569 * The routine scans buddy structures (not bitmap!) from given order
1567 * to max order and tries to find big enough chunk to satisfy the req 1570 * to max order and tries to find big enough chunk to satisfy the req
1568 */ 1571 */
1569static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1572static noinline_for_stack
1573void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1570 struct ext4_buddy *e4b) 1574 struct ext4_buddy *e4b)
1571{ 1575{
1572 struct super_block *sb = ac->ac_sb; 1576 struct super_block *sb = ac->ac_sb;
@@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1609 * In order to optimize scanning, caller must pass number of 1613 * In order to optimize scanning, caller must pass number of
1610 * free blocks in the group, so the routine can know upper limit. 1614 * free blocks in the group, so the routine can know upper limit.
1611 */ 1615 */
1612static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1616static noinline_for_stack
1617void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1613 struct ext4_buddy *e4b) 1618 struct ext4_buddy *e4b)
1614{ 1619{
1615 struct super_block *sb = ac->ac_sb; 1620 struct super_block *sb = ac->ac_sb;
@@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1668 * we try to find stripe-aligned chunks for stripe-size requests 1673 * we try to find stripe-aligned chunks for stripe-size requests
1669 * XXX should do so at least for multiples of stripe size as well 1674 * XXX should do so at least for multiples of stripe size as well
1670 */ 1675 */
1671static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1676static noinline_for_stack
1677void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1672 struct ext4_buddy *e4b) 1678 struct ext4_buddy *e4b)
1673{ 1679{
1674 struct super_block *sb = ac->ac_sb; 1680 struct super_block *sb = ac->ac_sb;
@@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1831 1837
1832} 1838}
1833 1839
1834static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1840static noinline_for_stack
1841int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1835{ 1842{
1836 1843
1837 int ret; 1844 int ret;
@@ -2902,7 +2909,11 @@ int __init init_ext4_mballoc(void)
2902 2909
2903void exit_ext4_mballoc(void) 2910void exit_ext4_mballoc(void)
2904{ 2911{
2905 /* XXX: synchronize_rcu(); */ 2912 /*
2913 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2914 * before destroying the slab cache.
2915 */
2916 rcu_barrier();
2906 kmem_cache_destroy(ext4_pspace_cachep); 2917 kmem_cache_destroy(ext4_pspace_cachep);
2907 kmem_cache_destroy(ext4_ac_cachep); 2918 kmem_cache_destroy(ext4_ac_cachep);
2908 kmem_cache_destroy(ext4_free_ext_cachep); 2919 kmem_cache_destroy(ext4_free_ext_cachep);
@@ -3457,7 +3468,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3457 * used in in-core bitmap. buddy must be generated from this bitmap 3468 * used in in-core bitmap. buddy must be generated from this bitmap
3458 * Need to be called with ext4 group lock held 3469 * Need to be called with ext4 group lock held
3459 */ 3470 */
3460static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3471static noinline_for_stack
3472void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3461 ext4_group_t group) 3473 ext4_group_t group)
3462{ 3474{
3463 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3475 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
@@ -4215,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4215 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4227 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4216 4228
4217 /* set up allocation goals */ 4229 /* set up allocation goals */
4230 memset(ac, 0, sizeof(struct ext4_allocation_context));
4218 ac->ac_b_ex.fe_logical = ar->logical; 4231 ac->ac_b_ex.fe_logical = ar->logical;
4219 ac->ac_b_ex.fe_group = 0;
4220 ac->ac_b_ex.fe_start = 0;
4221 ac->ac_b_ex.fe_len = 0;
4222 ac->ac_status = AC_STATUS_CONTINUE; 4232 ac->ac_status = AC_STATUS_CONTINUE;
4223 ac->ac_groups_scanned = 0;
4224 ac->ac_ex_scanned = 0;
4225 ac->ac_found = 0;
4226 ac->ac_sb = sb; 4233 ac->ac_sb = sb;
4227 ac->ac_inode = ar->inode; 4234 ac->ac_inode = ar->inode;
4228 ac->ac_o_ex.fe_logical = ar->logical; 4235 ac->ac_o_ex.fe_logical = ar->logical;
@@ -4233,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4233 ac->ac_g_ex.fe_group = group; 4240 ac->ac_g_ex.fe_group = group;
4234 ac->ac_g_ex.fe_start = block; 4241 ac->ac_g_ex.fe_start = block;
4235 ac->ac_g_ex.fe_len = len; 4242 ac->ac_g_ex.fe_len = len;
4236 ac->ac_f_ex.fe_len = 0;
4237 ac->ac_flags = ar->flags; 4243 ac->ac_flags = ar->flags;
4238 ac->ac_2order = 0;
4239 ac->ac_criteria = 0;
4240 ac->ac_pa = NULL;
4241 ac->ac_bitmap_page = NULL;
4242 ac->ac_buddy_page = NULL;
4243 ac->alloc_semp = NULL;
4244 ac->ac_lg = NULL;
4245 4244
4246 /* we have to define context: we'll we work with a file or 4245 /* we have to define context: we'll we work with a file or
4247 * locality group. this is a policy, actually */ 4246 * locality group. this is a policy, actually */
@@ -4509,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4509 } 4508 }
4510 4509
4511 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4510 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4512 if (ac) { 4511 if (!ac) {
4513 ac->ac_sb = sb;
4514 ac->ac_inode = ar->inode;
4515 } else {
4516 ar->len = 0; 4512 ar->len = 0;
4517 *errp = -ENOMEM; 4513 *errp = -ENOMEM;
4518 goto out1; 4514 goto out1;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 38ff75a0fe2..530b4ca0151 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/smp_lock.h>
20#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
21#include <linux/compat.h> 20#include <linux/compat.h>
22#include <asm/uaccess.h> 21#include <asm/uaccess.h>
diff --git a/fs/fat/file.c b/fs/fat/file.c
index b28ea646ff6..f042b965c95 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -134,7 +134,7 @@ static int fat_file_release(struct inode *inode, struct file *filp)
134 if ((filp->f_mode & FMODE_WRITE) && 134 if ((filp->f_mode & FMODE_WRITE) &&
135 MSDOS_SB(inode->i_sb)->options.flush) { 135 MSDOS_SB(inode->i_sb)->options.flush) {
136 fat_flush_inodes(inode->i_sb, inode, NULL); 136 fat_flush_inodes(inode->i_sb, inode, NULL);
137 congestion_wait(WRITE, HZ/10); 137 congestion_wait(BLK_RW_ASYNC, HZ/10);
138 } 138 }
139 return 0; 139 return 0;
140} 140}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 82f88733b68..bbc94ae4fd7 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -9,7 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/smp_lock.h>
13#include "fat.h" 12#include "fat.h"
14 13
15/* Characters that are undesirable in an MS-DOS file name */ 14/* Characters that are undesirable in an MS-DOS file name */
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 73471b7ecc8..cb6e8355711 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -19,7 +19,6 @@
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/smp_lock.h>
23#include <linux/buffer_head.h> 22#include <linux/buffer_head.h>
24#include <linux/namei.h> 23#include <linux/namei.h>
25#include "fat.h" 24#include "fat.h"
diff --git a/fs/fcntl.c b/fs/fcntl.c
index a040b764f8e..ae413086db9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -19,7 +19,6 @@
19#include <linux/signal.h> 19#include <linux/signal.h>
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/pid_namespace.h> 21#include <linux/pid_namespace.h>
22#include <linux/smp_lock.h>
23 22
24#include <asm/poll.h> 23#include <asm/poll.h>
25#include <asm/siginfo.h> 24#include <asm/siginfo.h>
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index cdbd1654e4c..1e8af939b3e 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -38,6 +38,7 @@
38#include <linux/buffer_head.h> 38#include <linux/buffer_head.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/smp_lock.h>
41#include <linux/stat.h> 42#include <linux/stat.h>
42#include <linux/vfs.h> 43#include <linux/vfs.h>
43#include <linux/mount.h> 44#include <linux/mount.h>
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8fed2ed12f3..6484eb75acd 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -286,8 +286,8 @@ __releases(&fc->lock)
286 } 286 }
287 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 287 if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
288 fc->connected && fc->bdi_initialized) { 288 fc->connected && fc->bdi_initialized) {
289 clear_bdi_congested(&fc->bdi, READ); 289 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
290 clear_bdi_congested(&fc->bdi, WRITE); 290 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
291 } 291 }
292 fc->num_background--; 292 fc->num_background--;
293 fc->active_background--; 293 fc->active_background--;
@@ -414,8 +414,8 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
414 fc->blocked = 1; 414 fc->blocked = 1;
415 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 415 if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
416 fc->bdi_initialized) { 416 fc->bdi_initialized) {
417 set_bdi_congested(&fc->bdi, READ); 417 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
418 set_bdi_congested(&fc->bdi, WRITE); 418 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
419 } 419 }
420 list_add_tail(&req->list, &fc->bg_queue); 420 list_add_tail(&req->list, &fc->bg_queue);
421 flush_bg_queue(fc); 421 flush_bg_queue(fc);
@@ -849,6 +849,81 @@ err:
849 return err; 849 return err;
850} 850}
851 851
852static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
853 struct fuse_copy_state *cs)
854{
855 struct fuse_notify_inval_inode_out outarg;
856 int err = -EINVAL;
857
858 if (size != sizeof(outarg))
859 goto err;
860
861 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
862 if (err)
863 goto err;
864 fuse_copy_finish(cs);
865
866 down_read(&fc->killsb);
867 err = -ENOENT;
868 if (!fc->sb)
869 goto err_unlock;
870
871 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
872 outarg.off, outarg.len);
873
874err_unlock:
875 up_read(&fc->killsb);
876 return err;
877
878err:
879 fuse_copy_finish(cs);
880 return err;
881}
882
883static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
884 struct fuse_copy_state *cs)
885{
886 struct fuse_notify_inval_entry_out outarg;
887 int err = -EINVAL;
888 char buf[FUSE_NAME_MAX+1];
889 struct qstr name;
890
891 if (size < sizeof(outarg))
892 goto err;
893
894 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
895 if (err)
896 goto err;
897
898 err = -ENAMETOOLONG;
899 if (outarg.namelen > FUSE_NAME_MAX)
900 goto err;
901
902 name.name = buf;
903 name.len = outarg.namelen;
904 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
905 if (err)
906 goto err;
907 fuse_copy_finish(cs);
908 buf[outarg.namelen] = 0;
909 name.hash = full_name_hash(name.name, name.len);
910
911 down_read(&fc->killsb);
912 err = -ENOENT;
913 if (!fc->sb)
914 goto err_unlock;
915
916 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
917
918err_unlock:
919 up_read(&fc->killsb);
920 return err;
921
922err:
923 fuse_copy_finish(cs);
924 return err;
925}
926
852static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 927static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
853 unsigned int size, struct fuse_copy_state *cs) 928 unsigned int size, struct fuse_copy_state *cs)
854{ 929{
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
856 case FUSE_NOTIFY_POLL: 931 case FUSE_NOTIFY_POLL:
857 return fuse_notify_poll(fc, size, cs); 932 return fuse_notify_poll(fc, size, cs);
858 933
934 case FUSE_NOTIFY_INVAL_INODE:
935 return fuse_notify_inval_inode(fc, size, cs);
936
937 case FUSE_NOTIFY_INVAL_ENTRY:
938 return fuse_notify_inval_entry(fc, size, cs);
939
859 default: 940 default:
860 fuse_copy_finish(cs); 941 fuse_copy_finish(cs);
861 return -EINVAL; 942 return -EINVAL;
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
910 unsigned long nr_segs, loff_t pos) 991 unsigned long nr_segs, loff_t pos)
911{ 992{
912 int err; 993 int err;
913 unsigned nbytes = iov_length(iov, nr_segs); 994 size_t nbytes = iov_length(iov, nr_segs);
914 struct fuse_req *req; 995 struct fuse_req *req;
915 struct fuse_out_header oh; 996 struct fuse_out_header oh;
916 struct fuse_copy_state cs; 997 struct fuse_copy_state cs;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b3089a083d3..e703654e7f4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
375 struct fuse_conn *fc = get_fuse_conn(dir); 375 struct fuse_conn *fc = get_fuse_conn(dir);
376 struct fuse_req *req; 376 struct fuse_req *req;
377 struct fuse_req *forget_req; 377 struct fuse_req *forget_req;
378 struct fuse_open_in inarg; 378 struct fuse_create_in inarg;
379 struct fuse_open_out outopen; 379 struct fuse_open_out outopen;
380 struct fuse_entry_out outentry; 380 struct fuse_entry_out outentry;
381 struct fuse_file *ff; 381 struct fuse_file *ff;
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
399 if (!ff) 399 if (!ff)
400 goto out_put_request; 400 goto out_put_request;
401 401
402 if (!fc->dont_mask)
403 mode &= ~current_umask();
404
402 flags &= ~O_NOCTTY; 405 flags &= ~O_NOCTTY;
403 memset(&inarg, 0, sizeof(inarg)); 406 memset(&inarg, 0, sizeof(inarg));
404 memset(&outentry, 0, sizeof(outentry)); 407 memset(&outentry, 0, sizeof(outentry));
405 inarg.flags = flags; 408 inarg.flags = flags;
406 inarg.mode = mode; 409 inarg.mode = mode;
410 inarg.umask = current_umask();
407 req->in.h.opcode = FUSE_CREATE; 411 req->in.h.opcode = FUSE_CREATE;
408 req->in.h.nodeid = get_node_id(dir); 412 req->in.h.nodeid = get_node_id(dir);
409 req->in.numargs = 2; 413 req->in.numargs = 2;
410 req->in.args[0].size = sizeof(inarg); 414 req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
415 sizeof(inarg);
411 req->in.args[0].value = &inarg; 416 req->in.args[0].value = &inarg;
412 req->in.args[1].size = entry->d_name.len + 1; 417 req->in.args[1].size = entry->d_name.len + 1;
413 req->in.args[1].value = entry->d_name.name; 418 req->in.args[1].value = entry->d_name.name;
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
546 if (IS_ERR(req)) 551 if (IS_ERR(req))
547 return PTR_ERR(req); 552 return PTR_ERR(req);
548 553
554 if (!fc->dont_mask)
555 mode &= ~current_umask();
556
549 memset(&inarg, 0, sizeof(inarg)); 557 memset(&inarg, 0, sizeof(inarg));
550 inarg.mode = mode; 558 inarg.mode = mode;
551 inarg.rdev = new_encode_dev(rdev); 559 inarg.rdev = new_encode_dev(rdev);
560 inarg.umask = current_umask();
552 req->in.h.opcode = FUSE_MKNOD; 561 req->in.h.opcode = FUSE_MKNOD;
553 req->in.numargs = 2; 562 req->in.numargs = 2;
554 req->in.args[0].size = sizeof(inarg); 563 req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
564 sizeof(inarg);
555 req->in.args[0].value = &inarg; 565 req->in.args[0].value = &inarg;
556 req->in.args[1].size = entry->d_name.len + 1; 566 req->in.args[1].size = entry->d_name.len + 1;
557 req->in.args[1].value = entry->d_name.name; 567 req->in.args[1].value = entry->d_name.name;
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
578 if (IS_ERR(req)) 588 if (IS_ERR(req))
579 return PTR_ERR(req); 589 return PTR_ERR(req);
580 590
591 if (!fc->dont_mask)
592 mode &= ~current_umask();
593
581 memset(&inarg, 0, sizeof(inarg)); 594 memset(&inarg, 0, sizeof(inarg));
582 inarg.mode = mode; 595 inarg.mode = mode;
596 inarg.umask = current_umask();
583 req->in.h.opcode = FUSE_MKDIR; 597 req->in.h.opcode = FUSE_MKDIR;
584 req->in.numargs = 2; 598 req->in.numargs = 2;
585 req->in.args[0].size = sizeof(inarg); 599 req->in.args[0].size = sizeof(inarg);
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
845 return err; 859 return err;
846} 860}
847 861
862int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
863 struct qstr *name)
864{
865 int err = -ENOTDIR;
866 struct inode *parent;
867 struct dentry *dir;
868 struct dentry *entry;
869
870 parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
871 if (!parent)
872 return -ENOENT;
873
874 mutex_lock(&parent->i_mutex);
875 if (!S_ISDIR(parent->i_mode))
876 goto unlock;
877
878 err = -ENOENT;
879 dir = d_find_alias(parent);
880 if (!dir)
881 goto unlock;
882
883 entry = d_lookup(dir, name);
884 dput(dir);
885 if (!entry)
886 goto unlock;
887
888 fuse_invalidate_attr(parent);
889 fuse_invalidate_entry(entry);
890 dput(entry);
891 err = 0;
892
893 unlock:
894 mutex_unlock(&parent->i_mutex);
895 iput(parent);
896 return err;
897}
898
848/* 899/*
849 * Calling into a user-controlled filesystem gives the filesystem 900 * Calling into a user-controlled filesystem gives the filesystem
850 * daemon ptrace-like capabilities over the requester process. This 901 * daemon ptrace-like capabilities over the requester process. This
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fce6ce694fd..cbc464043b6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
1922 1922
1923 req = fuse_get_req(fc); 1923 req = fuse_get_req(fc);
1924 if (IS_ERR(req)) 1924 if (IS_ERR(req))
1925 return PTR_ERR(req); 1925 return POLLERR;
1926 1926
1927 req->in.h.opcode = FUSE_POLL; 1927 req->in.h.opcode = FUSE_POLL;
1928 req->in.h.nodeid = ff->nodeid; 1928 req->in.h.nodeid = ff->nodeid;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index aaf2f9ff970..52b641fc0fa 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -446,6 +446,9 @@ struct fuse_conn {
446 /** Do multi-page cached writes */ 446 /** Do multi-page cached writes */
447 unsigned big_writes:1; 447 unsigned big_writes:1;
448 448
449 /** Don't apply umask to creation modes */
450 unsigned dont_mask:1;
451
449 /** The number of requests waiting for completion */ 452 /** The number of requests waiting for completion */
450 atomic_t num_waiting; 453 atomic_t num_waiting;
451 454
@@ -481,6 +484,12 @@ struct fuse_conn {
481 484
482 /** Called on final put */ 485 /** Called on final put */
483 void (*release)(struct fuse_conn *); 486 void (*release)(struct fuse_conn *);
487
488 /** Super block for this connection. */
489 struct super_block *sb;
490
491 /** Read/write semaphore to hold when accessing sb. */
492 struct rw_semaphore killsb;
484}; 493};
485 494
486static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 495static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations;
509extern const struct dentry_operations fuse_dentry_operations; 518extern const struct dentry_operations fuse_dentry_operations;
510 519
511/** 520/**
521 * Inode to nodeid comparison.
522 */
523int fuse_inode_eq(struct inode *inode, void *_nodeidp);
524
525/**
512 * Get a filled in inode 526 * Get a filled in inode
513 */ 527 */
514struct inode *fuse_iget(struct super_block *sb, u64 nodeid, 528struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode);
708 722
709u64 fuse_get_attr_version(struct fuse_conn *fc); 723u64 fuse_get_attr_version(struct fuse_conn *fc);
710 724
725/**
726 * File-system tells the kernel to invalidate cache for the given node id.
727 */
728int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
729 loff_t offset, loff_t len);
730
731/**
732 * File-system tells the kernel to invalidate parent attributes and
733 * the dentry matching parent/name.
734 */
735int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
736 struct qstr *name);
737
711int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 738int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
712 bool isdir); 739 bool isdir);
713ssize_t fuse_direct_io(struct file *file, const char __user *buf, 740ssize_t fuse_direct_io(struct file *file, const char __user *buf,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index d8673ccf90b..f91ccc4a189 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
206 BUG(); 206 BUG();
207} 207}
208 208
209static int fuse_inode_eq(struct inode *inode, void *_nodeidp) 209int fuse_inode_eq(struct inode *inode, void *_nodeidp)
210{ 210{
211 u64 nodeid = *(u64 *) _nodeidp; 211 u64 nodeid = *(u64 *) _nodeidp;
212 if (get_node_id(inode) == nodeid) 212 if (get_node_id(inode) == nodeid)
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
257 return inode; 257 return inode;
258} 258}
259 259
260int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
261 loff_t offset, loff_t len)
262{
263 struct inode *inode;
264 pgoff_t pg_start;
265 pgoff_t pg_end;
266
267 inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
268 if (!inode)
269 return -ENOENT;
270
271 fuse_invalidate_attr(inode);
272 if (offset >= 0) {
273 pg_start = offset >> PAGE_CACHE_SHIFT;
274 if (len <= 0)
275 pg_end = -1;
276 else
277 pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
278 invalidate_inode_pages2_range(inode->i_mapping,
279 pg_start, pg_end);
280 }
281 iput(inode);
282 return 0;
283}
284
260static void fuse_umount_begin(struct super_block *sb) 285static void fuse_umount_begin(struct super_block *sb)
261{ 286{
262 fuse_abort_conn(get_fuse_conn_super(sb)); 287 fuse_abort_conn(get_fuse_conn_super(sb));
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc)
480 memset(fc, 0, sizeof(*fc)); 505 memset(fc, 0, sizeof(*fc));
481 spin_lock_init(&fc->lock); 506 spin_lock_init(&fc->lock);
482 mutex_init(&fc->inst_mutex); 507 mutex_init(&fc->inst_mutex);
508 init_rwsem(&fc->killsb);
483 atomic_set(&fc->count, 1); 509 atomic_set(&fc->count, 1);
484 init_waitqueue_head(&fc->waitq); 510 init_waitqueue_head(&fc->waitq);
485 init_waitqueue_head(&fc->blocked_waitq); 511 init_waitqueue_head(&fc->blocked_waitq);
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
725 } 751 }
726 if (arg->flags & FUSE_BIG_WRITES) 752 if (arg->flags & FUSE_BIG_WRITES)
727 fc->big_writes = 1; 753 fc->big_writes = 1;
754 if (arg->flags & FUSE_DONT_MASK)
755 fc->dont_mask = 1;
728 } else { 756 } else {
729 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 757 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
730 fc->no_lock = 1; 758 fc->no_lock = 1;
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
748 arg->minor = FUSE_KERNEL_MINOR_VERSION; 776 arg->minor = FUSE_KERNEL_MINOR_VERSION;
749 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 777 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
750 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 778 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
751 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; 779 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK;
752 req->in.h.opcode = FUSE_INIT; 780 req->in.h.opcode = FUSE_INIT;
753 req->in.numargs = 1; 781 req->in.numargs = 1;
754 req->in.args[0].size = sizeof(*arg); 782 req->in.args[0].size = sizeof(*arg);
@@ -860,10 +888,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
860 fuse_conn_init(fc); 888 fuse_conn_init(fc);
861 889
862 fc->dev = sb->s_dev; 890 fc->dev = sb->s_dev;
891 fc->sb = sb;
863 err = fuse_bdi_init(fc, sb); 892 err = fuse_bdi_init(fc, sb);
864 if (err) 893 if (err)
865 goto err_put_conn; 894 goto err_put_conn;
866 895
896 /* Handle umasking inside the fuse code */
897 if (sb->s_flags & MS_POSIXACL)
898 fc->dont_mask = 1;
899 sb->s_flags |= MS_POSIXACL;
900
867 fc->release = fuse_free_conn; 901 fc->release = fuse_free_conn;
868 fc->flags = d.flags; 902 fc->flags = d.flags;
869 fc->user_id = d.user_id; 903 fc->user_id = d.user_id;
@@ -941,12 +975,25 @@ static int fuse_get_sb(struct file_system_type *fs_type,
941 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt); 975 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
942} 976}
943 977
978static void fuse_kill_sb_anon(struct super_block *sb)
979{
980 struct fuse_conn *fc = get_fuse_conn_super(sb);
981
982 if (fc) {
983 down_write(&fc->killsb);
984 fc->sb = NULL;
985 up_write(&fc->killsb);
986 }
987
988 kill_anon_super(sb);
989}
990
944static struct file_system_type fuse_fs_type = { 991static struct file_system_type fuse_fs_type = {
945 .owner = THIS_MODULE, 992 .owner = THIS_MODULE,
946 .name = "fuse", 993 .name = "fuse",
947 .fs_flags = FS_HAS_SUBTYPE, 994 .fs_flags = FS_HAS_SUBTYPE,
948 .get_sb = fuse_get_sb, 995 .get_sb = fuse_get_sb,
949 .kill_sb = kill_anon_super, 996 .kill_sb = fuse_kill_sb_anon,
950}; 997};
951 998
952#ifdef CONFIG_BLOCK 999#ifdef CONFIG_BLOCK
@@ -958,11 +1005,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type,
958 mnt); 1005 mnt);
959} 1006}
960 1007
1008static void fuse_kill_sb_blk(struct super_block *sb)
1009{
1010 struct fuse_conn *fc = get_fuse_conn_super(sb);
1011
1012 if (fc) {
1013 down_write(&fc->killsb);
1014 fc->sb = NULL;
1015 up_write(&fc->killsb);
1016 }
1017
1018 kill_block_super(sb);
1019}
1020
961static struct file_system_type fuseblk_fs_type = { 1021static struct file_system_type fuseblk_fs_type = {
962 .owner = THIS_MODULE, 1022 .owner = THIS_MODULE,
963 .name = "fuseblk", 1023 .name = "fuseblk",
964 .get_sb = fuse_get_sb_blk, 1024 .get_sb = fuse_get_sb_blk,
965 .kill_sb = kill_block_super, 1025 .kill_sb = fuse_kill_sb_blk,
966 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, 1026 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
967}; 1027};
968 1028
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 03ebb439ace..7ebae9a4ecc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
624{ 624{
625 struct gfs2_inode *ip = GFS2_I(mapping->host); 625 struct gfs2_inode *ip = GFS2_I(mapping->host);
626 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 626 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
627 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
627 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 628 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
628 int alloc_required; 629 int alloc_required;
629 int error = 0; 630 int error = 0;
@@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
637 error = gfs2_glock_nq(&ip->i_gh); 638 error = gfs2_glock_nq(&ip->i_gh);
638 if (unlikely(error)) 639 if (unlikely(error))
639 goto out_uninit; 640 goto out_uninit;
641 if (&ip->i_inode == sdp->sd_rindex) {
642 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
643 GL_NOCACHE, &m_ip->i_gh);
644 if (unlikely(error)) {
645 gfs2_glock_dq(&ip->i_gh);
646 goto out_uninit;
647 }
648 }
640 649
641 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); 650 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
642 if (error) 651 if (error)
@@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
667 rblocks += data_blocks ? data_blocks : 1; 676 rblocks += data_blocks ? data_blocks : 1;
668 if (ind_blocks || data_blocks) 677 if (ind_blocks || data_blocks)
669 rblocks += RES_STATFS + RES_QUOTA; 678 rblocks += RES_STATFS + RES_QUOTA;
679 if (&ip->i_inode == sdp->sd_rindex)
680 rblocks += 2 * RES_STATFS;
670 681
671 error = gfs2_trans_begin(sdp, rblocks, 682 error = gfs2_trans_begin(sdp, rblocks,
672 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 683 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -712,6 +723,10 @@ out_alloc_put:
712 gfs2_alloc_put(ip); 723 gfs2_alloc_put(ip);
713 } 724 }
714out_unlock: 725out_unlock:
726 if (&ip->i_inode == sdp->sd_rindex) {
727 gfs2_glock_dq(&m_ip->i_gh);
728 gfs2_holder_uninit(&m_ip->i_gh);
729 }
715 gfs2_glock_dq(&ip->i_gh); 730 gfs2_glock_dq(&ip->i_gh);
716out_uninit: 731out_uninit:
717 gfs2_holder_uninit(&ip->i_gh); 732 gfs2_holder_uninit(&ip->i_gh);
@@ -725,14 +740,21 @@ out_uninit:
725static void adjust_fs_space(struct inode *inode) 740static void adjust_fs_space(struct inode *inode)
726{ 741{
727 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 742 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
743 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
744 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
728 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 745 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
729 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 746 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
747 struct buffer_head *m_bh, *l_bh;
730 u64 fs_total, new_free; 748 u64 fs_total, new_free;
731 749
732 /* Total up the file system space, according to the latest rindex. */ 750 /* Total up the file system space, according to the latest rindex. */
733 fs_total = gfs2_ri_total(sdp); 751 fs_total = gfs2_ri_total(sdp);
752 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
753 return;
734 754
735 spin_lock(&sdp->sd_statfs_spin); 755 spin_lock(&sdp->sd_statfs_spin);
756 gfs2_statfs_change_in(m_sc, m_bh->b_data +
757 sizeof(struct gfs2_dinode));
736 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 758 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
737 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 759 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
738 else 760 else
@@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode)
741 fs_warn(sdp, "File system extended by %llu blocks.\n", 763 fs_warn(sdp, "File system extended by %llu blocks.\n",
742 (unsigned long long)new_free); 764 (unsigned long long)new_free);
743 gfs2_statfs_change(sdp, new_free, new_free, 0); 765 gfs2_statfs_change(sdp, new_free, new_free, 0);
766
767 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
768 goto out;
769 update_statfs(sdp, m_bh, l_bh);
770 brelse(l_bh);
771out:
772 brelse(m_bh);
744} 773}
745 774
746/** 775/**
@@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
763{ 792{
764 struct gfs2_inode *ip = GFS2_I(inode); 793 struct gfs2_inode *ip = GFS2_I(inode);
765 struct gfs2_sbd *sdp = GFS2_SB(inode); 794 struct gfs2_sbd *sdp = GFS2_SB(inode);
795 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
766 u64 to = pos + copied; 796 u64 to = pos + copied;
767 void *kaddr; 797 void *kaddr;
768 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 798 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
@@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
794 824
795 brelse(dibh); 825 brelse(dibh);
796 gfs2_trans_end(sdp); 826 gfs2_trans_end(sdp);
827 if (inode == sdp->sd_rindex) {
828 gfs2_glock_dq(&m_ip->i_gh);
829 gfs2_holder_uninit(&m_ip->i_gh);
830 }
797 gfs2_glock_dq(&ip->i_gh); 831 gfs2_glock_dq(&ip->i_gh);
798 gfs2_holder_uninit(&ip->i_gh); 832 gfs2_holder_uninit(&ip->i_gh);
799 return copied; 833 return copied;
@@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
823 struct inode *inode = page->mapping->host; 857 struct inode *inode = page->mapping->host;
824 struct gfs2_inode *ip = GFS2_I(inode); 858 struct gfs2_inode *ip = GFS2_I(inode);
825 struct gfs2_sbd *sdp = GFS2_SB(inode); 859 struct gfs2_sbd *sdp = GFS2_SB(inode);
860 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
826 struct buffer_head *dibh; 861 struct buffer_head *dibh;
827 struct gfs2_alloc *al = ip->i_alloc; 862 struct gfs2_alloc *al = ip->i_alloc;
828 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 863 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
@@ -865,6 +900,10 @@ failed:
865 gfs2_quota_unlock(ip); 900 gfs2_quota_unlock(ip);
866 gfs2_alloc_put(ip); 901 gfs2_alloc_put(ip);
867 } 902 }
903 if (inode == sdp->sd_rindex) {
904 gfs2_glock_dq(&m_ip->i_gh);
905 gfs2_holder_uninit(&m_ip->i_gh);
906 }
868 gfs2_glock_dq(&ip->i_gh); 907 gfs2_glock_dq(&ip->i_gh);
869 gfs2_holder_uninit(&ip->i_gh); 908 gfs2_holder_uninit(&ip->i_gh);
870 return ret; 909 return ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 297421c0427..8b674b1f3a5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
63static DECLARE_RWSEM(gfs2_umount_flush_sem); 63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 64static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 65static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue;
66static LIST_HEAD(lru_list); 67static LIST_HEAD(lru_list);
67static atomic_t lru_count = ATOMIC_INIT(0); 68static atomic_t lru_count = ATOMIC_INIT(0);
68static DEFINE_SPINLOCK(lru_lock); 69static DEFINE_SPINLOCK(lru_lock);
@@ -167,13 +168,33 @@ static void glock_free(struct gfs2_glock *gl)
167 * 168 *
168 */ 169 */
169 170
170static void gfs2_glock_hold(struct gfs2_glock *gl) 171void gfs2_glock_hold(struct gfs2_glock *gl)
171{ 172{
172 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 atomic_inc(&gl->gl_ref); 174 atomic_inc(&gl->gl_ref);
174} 175}
175 176
176/** 177/**
178 * demote_ok - Check to see if it's ok to unlock a glock
179 * @gl: the glock
180 *
181 * Returns: 1 if it's ok
182 */
183
184static int demote_ok(const struct gfs2_glock *gl)
185{
186 const struct gfs2_glock_operations *glops = gl->gl_ops;
187
188 if (gl->gl_state == LM_ST_UNLOCKED)
189 return 0;
190 if (!list_empty(&gl->gl_holders))
191 return 0;
192 if (glops->go_demote_ok)
193 return glops->go_demote_ok(gl);
194 return 1;
195}
196
197/**
177 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 198 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
178 * @gl: the glock 199 * @gl: the glock
179 * 200 *
@@ -181,8 +202,13 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
181 202
182static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 203static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
183{ 204{
205 int may_reclaim;
206 may_reclaim = (demote_ok(gl) &&
207 (atomic_read(&gl->gl_ref) == 1 ||
208 (gl->gl_name.ln_type == LM_TYPE_INODE &&
209 atomic_read(&gl->gl_ref) <= 2)));
184 spin_lock(&lru_lock); 210 spin_lock(&lru_lock);
185 if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { 211 if (list_empty(&gl->gl_lru) && may_reclaim) {
186 list_add_tail(&gl->gl_lru, &lru_list); 212 list_add_tail(&gl->gl_lru, &lru_list);
187 atomic_inc(&lru_count); 213 atomic_inc(&lru_count);
188 } 214 }
@@ -190,6 +216,21 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
190} 216}
191 217
192/** 218/**
219 * gfs2_glock_put_nolock() - Decrement reference count on glock
220 * @gl: The glock to put
221 *
222 * This function should only be used if the caller has its own reference
223 * to the glock, in addition to the one it is dropping.
224 */
225
226void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227{
228 if (atomic_dec_and_test(&gl->gl_ref))
229 GLOCK_BUG_ON(gl, 1);
230 gfs2_glock_schedule_for_reclaim(gl);
231}
232
233/**
193 * gfs2_glock_put() - Decrement reference count on glock 234 * gfs2_glock_put() - Decrement reference count on glock
194 * @gl: The glock to put 235 * @gl: The glock to put
195 * 236 *
@@ -214,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl)
214 rv = 1; 255 rv = 1;
215 goto out; 256 goto out;
216 } 257 }
217 /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ 258 spin_lock(&gl->gl_spin);
218 if (atomic_read(&gl->gl_ref) == 2) 259 gfs2_glock_schedule_for_reclaim(gl);
219 gfs2_glock_schedule_for_reclaim(gl); 260 spin_unlock(&gl->gl_spin);
220 write_unlock(gl_lock_addr(gl->gl_hash)); 261 write_unlock(gl_lock_addr(gl->gl_hash));
221out: 262out:
222 return rv; 263 return rv;
@@ -398,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
398 if (held2) 439 if (held2)
399 gfs2_glock_hold(gl); 440 gfs2_glock_hold(gl);
400 else 441 else
401 gfs2_glock_put(gl); 442 gfs2_glock_put_nolock(gl);
402 } 443 }
403 444
404 gl->gl_state = new_state; 445 gl->gl_state = new_state;
@@ -633,12 +674,35 @@ out:
633out_sched: 674out_sched:
634 gfs2_glock_hold(gl); 675 gfs2_glock_hold(gl);
635 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
636 gfs2_glock_put(gl); 677 gfs2_glock_put_nolock(gl);
637out_unlock: 678out_unlock:
638 clear_bit(GLF_LOCK, &gl->gl_flags); 679 clear_bit(GLF_LOCK, &gl->gl_flags);
639 goto out; 680 goto out;
640} 681}
641 682
683static void delete_work_func(struct work_struct *work)
684{
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 struct gfs2_sbd *sdp = gl->gl_sbd;
687 struct gfs2_inode *ip = NULL;
688 struct inode *inode;
689 u64 no_addr = 0;
690
691 spin_lock(&gl->gl_spin);
692 ip = (struct gfs2_inode *)gl->gl_object;
693 if (ip)
694 no_addr = ip->i_no_addr;
695 spin_unlock(&gl->gl_spin);
696 if (ip) {
697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698 if (inode) {
699 d_prune_aliases(inode);
700 iput(inode);
701 }
702 }
703 gfs2_glock_put(gl);
704}
705
642static void glock_work_func(struct work_struct *work) 706static void glock_work_func(struct work_struct *work)
643{ 707{
644 unsigned long delay = 0; 708 unsigned long delay = 0;
@@ -717,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
717 gl->gl_sbd = sdp; 781 gl->gl_sbd = sdp;
718 gl->gl_aspace = NULL; 782 gl->gl_aspace = NULL;
719 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 783 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
784 INIT_WORK(&gl->gl_delete, delete_work_func);
720 785
721 /* If this glock protects actual on-disk data or metadata blocks, 786 /* If this glock protects actual on-disk data or metadata blocks,
722 create a VFS inode to manage the pages/buffers holding them. */ 787 create a VFS inode to manage the pages/buffers holding them. */
@@ -858,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
858 gl->gl_demote_state != state) { 923 gl->gl_demote_state != state) {
859 gl->gl_demote_state = LM_ST_UNLOCKED; 924 gl->gl_demote_state = LM_ST_UNLOCKED;
860 } 925 }
926 if (gl->gl_ops->go_callback)
927 gl->gl_ops->go_callback(gl);
861 trace_gfs2_demote_rq(gl); 928 trace_gfs2_demote_rq(gl);
862} 929}
863 930
@@ -1274,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1274 gfs2_glock_put(gl); 1341 gfs2_glock_put(gl);
1275} 1342}
1276 1343
1277/**
1278 * demote_ok - Check to see if it's ok to unlock a glock
1279 * @gl: the glock
1280 *
1281 * Returns: 1 if it's ok
1282 */
1283
1284static int demote_ok(const struct gfs2_glock *gl)
1285{
1286 const struct gfs2_glock_operations *glops = gl->gl_ops;
1287
1288 if (gl->gl_state == LM_ST_UNLOCKED)
1289 return 0;
1290 if (!list_empty(&gl->gl_holders))
1291 return 0;
1292 if (glops->go_demote_ok)
1293 return glops->go_demote_ok(gl);
1294 return 1;
1295}
1296
1297 1344
1298static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) 1345static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1299{ 1346{
1300 struct gfs2_glock *gl; 1347 struct gfs2_glock *gl;
1301 int may_demote; 1348 int may_demote;
1302 int nr_skipped = 0; 1349 int nr_skipped = 0;
1303 int got_ref = 0;
1304 LIST_HEAD(skipped); 1350 LIST_HEAD(skipped);
1305 1351
1306 if (nr == 0) 1352 if (nr == 0)
@@ -1315,37 +1361,29 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1315 list_del_init(&gl->gl_lru); 1361 list_del_init(&gl->gl_lru);
1316 atomic_dec(&lru_count); 1362 atomic_dec(&lru_count);
1317 1363
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1318 /* Test for being demotable */ 1368 /* Test for being demotable */
1319 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1320 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
1321 got_ref = 1;
1322 spin_unlock(&lru_lock); 1371 spin_unlock(&lru_lock);
1323 spin_lock(&gl->gl_spin); 1372 spin_lock(&gl->gl_spin);
1324 may_demote = demote_ok(gl); 1373 may_demote = demote_ok(gl);
1325 spin_unlock(&gl->gl_spin);
1326 clear_bit(GLF_LOCK, &gl->gl_flags);
1327 if (may_demote) { 1374 if (may_demote) {
1328 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1329 nr--; 1376 nr--;
1330 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1331 gfs2_glock_put(gl);
1332 got_ref = 0;
1333 } 1377 }
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1334 spin_lock(&lru_lock); 1382 spin_lock(&lru_lock);
1335 if (may_demote) 1383 continue;
1336 continue;
1337 }
1338 if (list_empty(&gl->gl_lru) &&
1339 (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
1340 nr_skipped++;
1341 list_add(&gl->gl_lru, &skipped);
1342 }
1343 if (got_ref) {
1344 spin_unlock(&lru_lock);
1345 gfs2_glock_put(gl);
1346 spin_lock(&lru_lock);
1347 got_ref = 0;
1348 } 1384 }
1385 nr_skipped++;
1386 list_add(&gl->gl_lru, &skipped);
1349 } 1387 }
1350 list_splice(&skipped, &lru_list); 1388 list_splice(&skipped, &lru_list);
1351 atomic_add(nr_skipped, &lru_count); 1389 atomic_add(nr_skipped, &lru_count);
@@ -1727,6 +1765,11 @@ int __init gfs2_glock_init(void)
1727 glock_workqueue = create_workqueue("glock_workqueue"); 1765 glock_workqueue = create_workqueue("glock_workqueue");
1728 if (IS_ERR(glock_workqueue)) 1766 if (IS_ERR(glock_workqueue))
1729 return PTR_ERR(glock_workqueue); 1767 return PTR_ERR(glock_workqueue);
1768 gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1769 if (IS_ERR(gfs2_delete_workqueue)) {
1770 destroy_workqueue(glock_workqueue);
1771 return PTR_ERR(gfs2_delete_workqueue);
1772 }
1730 1773
1731 register_shrinker(&glock_shrinker); 1774 register_shrinker(&glock_shrinker);
1732 1775
@@ -1737,6 +1780,7 @@ void gfs2_glock_exit(void)
1737{ 1780{
1738 unregister_shrinker(&glock_shrinker); 1781 unregister_shrinker(&glock_shrinker);
1739 destroy_workqueue(glock_workqueue); 1782 destroy_workqueue(glock_workqueue);
1783 destroy_workqueue(gfs2_delete_workqueue);
1740} 1784}
1741 1785
1742static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1786static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index a602a28f6f0..c609894ec0d 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -143,6 +143,7 @@ struct lm_lockops {
143 143
144#define GLR_TRYFAILED 13 144#define GLR_TRYFAILED 13
145 145
146extern struct workqueue_struct *gfs2_delete_workqueue;
146static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 147static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
147{ 148{
148 struct gfs2_holder *gh; 149 struct gfs2_holder *gh;
@@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
191int gfs2_glock_get(struct gfs2_sbd *sdp, 192int gfs2_glock_get(struct gfs2_sbd *sdp,
192 u64 number, const struct gfs2_glock_operations *glops, 193 u64 number, const struct gfs2_glock_operations *glops,
193 int create, struct gfs2_glock **glp); 194 int create, struct gfs2_glock **glp);
195void gfs2_glock_hold(struct gfs2_glock *gl);
196void gfs2_glock_put_nolock(struct gfs2_glock *gl);
194int gfs2_glock_put(struct gfs2_glock *gl); 197int gfs2_glock_put(struct gfs2_glock *gl);
195void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 198void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
196 struct gfs2_holder *gh); 199 struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d5e4ab155ca..6985eef06c3 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
323 323
324 if (gl->gl_state != LM_ST_UNLOCKED && 324 if (gl->gl_state != LM_ST_UNLOCKED &&
325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
326 flush_workqueue(gfs2_delete_workqueue);
326 gfs2_meta_syncfs(sdp); 327 gfs2_meta_syncfs(sdp);
327 gfs2_log_shutdown(sdp); 328 gfs2_log_shutdown(sdp);
328 } 329 }
@@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
372 return 0; 373 return 0;
373} 374}
374 375
376/**
377 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
378 * @gl: the glock
379 *
380 * gl_spin lock is held while calling this
381 */
382static void iopen_go_callback(struct gfs2_glock *gl)
383{
384 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
385
386 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
387 gl->gl_state == LM_ST_SHARED &&
388 ip && test_bit(GIF_USER, &ip->i_flags)) {
389 gfs2_glock_hold(gl);
390 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
391 gfs2_glock_put_nolock(gl);
392 }
393}
394
375const struct gfs2_glock_operations gfs2_meta_glops = { 395const struct gfs2_glock_operations gfs2_meta_glops = {
376 .go_type = LM_TYPE_META, 396 .go_type = LM_TYPE_META,
377}; 397};
@@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
406 426
407const struct gfs2_glock_operations gfs2_iopen_glops = { 427const struct gfs2_glock_operations gfs2_iopen_glops = {
408 .go_type = LM_TYPE_IOPEN, 428 .go_type = LM_TYPE_IOPEN,
429 .go_callback = iopen_go_callback,
409}; 430};
410 431
411const struct gfs2_glock_operations gfs2_flock_glops = { 432const struct gfs2_glock_operations gfs2_flock_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225347fbff3..61801ada36f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -159,6 +159,7 @@ struct gfs2_glock_operations {
159 int (*go_lock) (struct gfs2_holder *gh); 159 int (*go_lock) (struct gfs2_holder *gh);
160 void (*go_unlock) (struct gfs2_holder *gh); 160 void (*go_unlock) (struct gfs2_holder *gh);
161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); 161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
162 void (*go_callback) (struct gfs2_glock *gl);
162 const int go_type; 163 const int go_type;
163 const unsigned long go_min_hold_time; 164 const unsigned long go_min_hold_time;
164}; 165};
@@ -228,6 +229,7 @@ struct gfs2_glock {
228 struct list_head gl_ail_list; 229 struct list_head gl_ail_list;
229 atomic_t gl_ail_count; 230 atomic_t gl_ail_count;
230 struct delayed_work gl_work; 231 struct delayed_work gl_work;
232 struct work_struct gl_delete;
231}; 233};
232 234
233#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ 235#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index daa4ae341a2..fba795798d3 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -285,27 +285,19 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
285 } 285 }
286 286
287 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; 287 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
288 if (count[1] + count[2] != tmp) { 288 if (count[1] != tmp) {
289 if (gfs2_consist_rgrpd(rgd)) 289 if (gfs2_consist_rgrpd(rgd))
290 fs_err(sdp, "used data mismatch: %u != %u\n", 290 fs_err(sdp, "used data mismatch: %u != %u\n",
291 count[1], tmp); 291 count[1], tmp);
292 return; 292 return;
293 } 293 }
294 294
295 if (count[3] != rgd->rd_dinodes) { 295 if (count[2] + count[3] != rgd->rd_dinodes) {
296 if (gfs2_consist_rgrpd(rgd)) 296 if (gfs2_consist_rgrpd(rgd))
297 fs_err(sdp, "used metadata mismatch: %u != %u\n", 297 fs_err(sdp, "used metadata mismatch: %u != %u\n",
298 count[3], rgd->rd_dinodes); 298 count[2] + count[3], rgd->rd_dinodes);
299 return; 299 return;
300 } 300 }
301
302 if (count[2] > count[3]) {
303 if (gfs2_consist_rgrpd(rgd))
304 fs_err(sdp, "unlinked inodes > inodes: %u\n",
305 count[2]);
306 return;
307 }
308
309} 301}
310 302
311static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) 303static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
@@ -961,7 +953,8 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
961 * Returns: The inode, if one has been found 953 * Returns: The inode, if one has been found
962 */ 954 */
963 955
964static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked) 956static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
957 u64 skip)
965{ 958{
966 struct inode *inode; 959 struct inode *inode;
967 u32 goal = 0, block; 960 u32 goal = 0, block;
@@ -985,6 +978,8 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
985 goal++; 978 goal++;
986 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) 979 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
987 continue; 980 continue;
981 if (no_addr == skip)
982 continue;
988 *last_unlinked = no_addr; 983 *last_unlinked = no_addr;
989 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN, 984 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
990 no_addr, -1, 1); 985 no_addr, -1, 1);
@@ -1104,7 +1099,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1104 if (try_rgrp_fit(rgd, al)) 1099 if (try_rgrp_fit(rgd, al))
1105 goto out; 1100 goto out;
1106 if (rgd->rd_flags & GFS2_RDF_CHECK) 1101 if (rgd->rd_flags & GFS2_RDF_CHECK)
1107 inode = try_rgrp_unlink(rgd, last_unlinked); 1102 inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1108 if (!rg_locked) 1103 if (!rg_locked)
1109 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1104 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1110 if (inode) 1105 if (inode)
@@ -1138,7 +1133,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1138 if (try_rgrp_fit(rgd, al)) 1133 if (try_rgrp_fit(rgd, al))
1139 goto out; 1134 goto out;
1140 if (rgd->rd_flags & GFS2_RDF_CHECK) 1135 if (rgd->rd_flags & GFS2_RDF_CHECK)
1141 inode = try_rgrp_unlink(rgd, last_unlinked); 1136 inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1142 if (!rg_locked) 1137 if (!rg_locked)
1143 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1138 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1144 if (inode) 1139 if (inode)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0a680133647..f522bb01797 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -353,7 +353,7 @@ fail:
353 return error; 353 return error;
354} 354}
355 355
356static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 356void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
357{ 357{
358 const struct gfs2_statfs_change *str = buf; 358 const struct gfs2_statfs_change *str = buf;
359 359
@@ -441,6 +441,29 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
441 brelse(l_bh); 441 brelse(l_bh);
442} 442}
443 443
444void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
445 struct buffer_head *l_bh)
446{
447 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
448 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
449 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
450 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
451
452 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
453
454 spin_lock(&sdp->sd_statfs_spin);
455 m_sc->sc_total += l_sc->sc_total;
456 m_sc->sc_free += l_sc->sc_free;
457 m_sc->sc_dinodes += l_sc->sc_dinodes;
458 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
459 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
460 0, sizeof(struct gfs2_statfs_change));
461 spin_unlock(&sdp->sd_statfs_spin);
462
463 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
464 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
465}
466
444int gfs2_statfs_sync(struct gfs2_sbd *sdp) 467int gfs2_statfs_sync(struct gfs2_sbd *sdp)
445{ 468{
446 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 469 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -477,19 +500,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
477 if (error) 500 if (error)
478 goto out_bh2; 501 goto out_bh2;
479 502
480 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); 503 update_statfs(sdp, m_bh, l_bh);
481
482 spin_lock(&sdp->sd_statfs_spin);
483 m_sc->sc_total += l_sc->sc_total;
484 m_sc->sc_free += l_sc->sc_free;
485 m_sc->sc_dinodes += l_sc->sc_dinodes;
486 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
487 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
488 0, sizeof(struct gfs2_statfs_change));
489 spin_unlock(&sdp->sd_statfs_spin);
490
491 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
492 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
493 504
494 gfs2_trans_end(sdp); 505 gfs2_trans_end(sdp);
495 506
@@ -680,6 +691,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
680 struct gfs2_holder t_gh; 691 struct gfs2_holder t_gh;
681 int error; 692 int error;
682 693
694 flush_workqueue(gfs2_delete_workqueue);
683 gfs2_quota_sync(sdp); 695 gfs2_quota_sync(sdp);
684 gfs2_statfs_sync(sdp); 696 gfs2_statfs_sync(sdp);
685 697
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b56413e3e40..22e0417ed99 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -40,6 +40,10 @@ extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
40extern int gfs2_statfs_init(struct gfs2_sbd *sdp); 40extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
41extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 41extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
42 s64 dinodes); 42 s64 dinodes);
43extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh);
43extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
44 48
45extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 98d6ef1c1dc..148d55c1417 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -1,12 +1,11 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM gfs2
3
1#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_GFS2_H 5#define _TRACE_GFS2_H
3 6
4#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
5 8
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM gfs2
8#define TRACE_INCLUDE_FILE trace_gfs2
9
10#include <linux/fs.h> 9#include <linux/fs.h>
11#include <linux/buffer_head.h> 10#include <linux/buffer_head.h>
12#include <linux/dlmconstants.h> 11#include <linux/dlmconstants.h>
@@ -403,5 +402,6 @@ TRACE_EVENT(gfs2_block_alloc,
403/* This part must be outside protection */ 402/* This part must be outside protection */
404#undef TRACE_INCLUDE_PATH 403#undef TRACE_INCLUDE_PATH
405#define TRACE_INCLUDE_PATH . 404#define TRACE_INCLUDE_PATH .
405#define TRACE_INCLUDE_FILE trace_gfs2
406#include <trace/define_trace.h> 406#include <trace/define_trace.h>
407 407
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 6f833dc8e91..f7fcbe49da7 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -19,6 +19,7 @@
19#include <linux/nls.h> 19#include <linux/nls.h>
20#include <linux/parser.h> 20#include <linux/parser.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/smp_lock.h>
22#include <linux/vfs.h> 23#include <linux/vfs.h>
23 24
24#include "hfs_fs.h" 25#include "hfs_fs.h"
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 9fc3af0c0da..c0759fe0855 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -12,6 +12,7 @@
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smp_lock.h>
15#include <linux/vfs.h> 16#include <linux/vfs.h>
16#include <linux/nls.h> 17#include <linux/nls.h>
17 18
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe02ad4740e..032604e5ef2 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
972 sb->s_blocksize_bits = 10; 972 sb->s_blocksize_bits = 10;
973 sb->s_magic = HOSTFS_SUPER_MAGIC; 973 sb->s_magic = HOSTFS_SUPER_MAGIC;
974 sb->s_op = &hostfs_sbops; 974 sb->s_op = &hostfs_sbops;
975 sb->s_maxbytes = MAX_LFS_FILESIZE;
975 976
976 /* NULL is printed as <NULL> by sprintf: avoid that. */ 977 /* NULL is printed as <NULL> by sprintf: avoid that. */
977 if (req_root == NULL) 978 if (req_root == NULL)
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 6916c41d701..8865c94f55f 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -6,6 +6,7 @@
6 * directory VFS functions 6 * directory VFS functions
7 */ 7 */
8 8
9#include <linux/smp_lock.h>
9#include "hpfs_fn.h" 10#include "hpfs_fn.h"
10 11
11static int hpfs_dir_release(struct inode *inode, struct file *filp) 12static int hpfs_dir_release(struct inode *inode, struct file *filp)
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 64ab5225920..3efabff0036 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -6,6 +6,7 @@
6 * file VFS functions 6 * file VFS functions
7 */ 7 */
8 8
9#include <linux/smp_lock.h>
9#include "hpfs_fn.h" 10#include "hpfs_fn.h"
10 11
11#define BLOCKS(size) (((size) + 511) >> 9) 12#define BLOCKS(size) (((size) + 511) >> 9)
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c2ea31bae31..701ca54c086 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -13,7 +13,6 @@
13#include <linux/pagemap.h> 13#include <linux/pagemap.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/smp_lock.h>
17 16
18#include "hpfs.h" 17#include "hpfs.h"
19 18
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 39a1bfbea31..fe703ae46bc 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -6,6 +6,7 @@
6 * inode VFS functions 6 * inode VFS functions
7 */ 7 */
8 8
9#include <linux/smp_lock.h>
9#include "hpfs_fn.h" 10#include "hpfs_fn.h"
10 11
11void hpfs_init_inode(struct inode *i) 12void hpfs_init_inode(struct inode *i)
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index b649232dde9..82b9c4ba9ed 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -6,6 +6,7 @@
6 * adding & removing files & directories 6 * adding & removing files & directories
7 */ 7 */
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/smp_lock.h>
9#include "hpfs_fn.h" 10#include "hpfs_fn.h"
10 11
11static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 12static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
diff --git a/fs/inode.c b/fs/inode.c
index 901bad1e5f1..ae7b67e4866 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode)
120 * These are initializations that need to be done on every inode 120 * These are initializations that need to be done on every inode
121 * allocation as the fields are not initialised by slab allocation. 121 * allocation as the fields are not initialised by slab allocation.
122 */ 122 */
123struct inode *inode_init_always(struct super_block *sb, struct inode *inode) 123int inode_init_always(struct super_block *sb, struct inode *inode)
124{ 124{
125 static const struct address_space_operations empty_aops; 125 static const struct address_space_operations empty_aops;
126 static struct inode_operations empty_iops; 126 static struct inode_operations empty_iops;
127 static const struct file_operations empty_fops; 127 static const struct file_operations empty_fops;
128
129 struct address_space *const mapping = &inode->i_data; 128 struct address_space *const mapping = &inode->i_data;
130 129
131 inode->i_sb = sb; 130 inode->i_sb = sb;
@@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
152 inode->dirtied_when = 0; 151 inode->dirtied_when = 0;
153 152
154 if (security_inode_alloc(inode)) 153 if (security_inode_alloc(inode))
155 goto out_free_inode; 154 goto out;
156 155
157 /* allocate and initialize an i_integrity */ 156 /* allocate and initialize an i_integrity */
158 if (ima_inode_alloc(inode)) 157 if (ima_inode_alloc(inode))
@@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
198 inode->i_fsnotify_mask = 0; 197 inode->i_fsnotify_mask = 0;
199#endif 198#endif
200 199
201 return inode; 200 return 0;
202 201
203out_free_security: 202out_free_security:
204 security_inode_free(inode); 203 security_inode_free(inode);
205out_free_inode: 204out:
206 if (inode->i_sb->s_op->destroy_inode) 205 return -ENOMEM;
207 inode->i_sb->s_op->destroy_inode(inode);
208 else
209 kmem_cache_free(inode_cachep, (inode));
210 return NULL;
211} 206}
212EXPORT_SYMBOL(inode_init_always); 207EXPORT_SYMBOL(inode_init_always);
213 208
@@ -220,12 +215,21 @@ static struct inode *alloc_inode(struct super_block *sb)
220 else 215 else
221 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 216 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
222 217
223 if (inode) 218 if (!inode)
224 return inode_init_always(sb, inode); 219 return NULL;
225 return NULL; 220
221 if (unlikely(inode_init_always(sb, inode))) {
222 if (inode->i_sb->s_op->destroy_inode)
223 inode->i_sb->s_op->destroy_inode(inode);
224 else
225 kmem_cache_free(inode_cachep, inode);
226 return NULL;
227 }
228
229 return inode;
226} 230}
227 231
228void destroy_inode(struct inode *inode) 232void __destroy_inode(struct inode *inode)
229{ 233{
230 BUG_ON(inode_has_buffers(inode)); 234 BUG_ON(inode_has_buffers(inode));
231 ima_inode_free(inode); 235 ima_inode_free(inode);
@@ -237,13 +241,17 @@ void destroy_inode(struct inode *inode)
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 241 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl); 242 posix_acl_release(inode->i_default_acl);
239#endif 243#endif
244}
245EXPORT_SYMBOL(__destroy_inode);
246
247void destroy_inode(struct inode *inode)
248{
249 __destroy_inode(inode);
240 if (inode->i_sb->s_op->destroy_inode) 250 if (inode->i_sb->s_op->destroy_inode)
241 inode->i_sb->s_op->destroy_inode(inode); 251 inode->i_sb->s_op->destroy_inode(inode);
242 else 252 else
243 kmem_cache_free(inode_cachep, (inode)); 253 kmem_cache_free(inode_cachep, (inode));
244} 254}
245EXPORT_SYMBOL(destroy_inode);
246
247 255
248/* 256/*
249 * These are initializations that only need to be done 257 * These are initializations that only need to be done
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 58a7963e168..85f96bc651c 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -142,6 +142,7 @@ static const struct dentry_operations isofs_dentry_ops[] = {
142 142
143struct iso9660_options{ 143struct iso9660_options{
144 unsigned int rock:1; 144 unsigned int rock:1;
145 unsigned int joliet:1;
145 unsigned int cruft:1; 146 unsigned int cruft:1;
146 unsigned int hide:1; 147 unsigned int hide:1;
147 unsigned int showassoc:1; 148 unsigned int showassoc:1;
@@ -151,7 +152,6 @@ struct iso9660_options{
151 unsigned int gid_set:1; 152 unsigned int gid_set:1;
152 unsigned int utf8:1; 153 unsigned int utf8:1;
153 unsigned char map; 154 unsigned char map;
154 char joliet;
155 unsigned char check; 155 unsigned char check;
156 unsigned int blocksize; 156 unsigned int blocksize;
157 mode_t fmode; 157 mode_t fmode;
@@ -632,7 +632,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
632 else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) { 632 else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) {
633 sec = (struct iso_supplementary_descriptor *)vdp; 633 sec = (struct iso_supplementary_descriptor *)vdp;
634 if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) { 634 if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
635 if (opt.joliet == 'y') { 635 if (opt.joliet) {
636 if (sec->escape[2] == 0x40) 636 if (sec->escape[2] == 0x40)
637 joliet_level = 1; 637 joliet_level = 1;
638 else if (sec->escape[2] == 0x43) 638 else if (sec->escape[2] == 0x43)
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 737f7246a4b..f96f85092d1 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
287 struct page *new_page; 287 struct page *new_page;
288 unsigned int new_offset; 288 unsigned int new_offset;
289 struct buffer_head *bh_in = jh2bh(jh_in); 289 struct buffer_head *bh_in = jh2bh(jh_in);
290 journal_t *journal = transaction->t_journal;
290 291
291 /* 292 /*
292 * The buffer really shouldn't be locked: only the current committing 293 * The buffer really shouldn't be locked: only the current committing
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
300 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); 301 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
301 302
302 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); 303 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
304 /* keep subsequent assertions sane */
305 new_bh->b_state = 0;
306 init_buffer(new_bh, NULL, NULL);
307 atomic_set(&new_bh->b_count, 1);
308 new_jh = journal_add_journal_head(new_bh); /* This sleeps */
303 309
304 /* 310 /*
305 * If a new transaction has already done a buffer copy-out, then 311 * If a new transaction has already done a buffer copy-out, then
@@ -361,14 +367,6 @@ repeat:
361 kunmap_atomic(mapped_data, KM_USER0); 367 kunmap_atomic(mapped_data, KM_USER0);
362 } 368 }
363 369
364 /* keep subsequent assertions sane */
365 new_bh->b_state = 0;
366 init_buffer(new_bh, NULL, NULL);
367 atomic_set(&new_bh->b_count, 1);
368 jbd_unlock_bh_state(bh_in);
369
370 new_jh = journal_add_journal_head(new_bh); /* This sleeps */
371
372 set_bh_page(new_bh, new_page, new_offset); 370 set_bh_page(new_bh, new_page, new_offset);
373 new_jh->b_transaction = NULL; 371 new_jh->b_transaction = NULL;
374 new_bh->b_size = jh2bh(jh_in)->b_size; 372 new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -385,7 +383,11 @@ repeat:
385 * copying is moved to the transaction's shadow queue. 383 * copying is moved to the transaction's shadow queue.
386 */ 384 */
387 JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); 385 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
388 journal_file_buffer(jh_in, transaction, BJ_Shadow); 386 spin_lock(&journal->j_list_lock);
387 __journal_file_buffer(jh_in, transaction, BJ_Shadow);
388 spin_unlock(&journal->j_list_lock);
389 jbd_unlock_bh_state(bh_in);
390
389 JBUFFER_TRACE(new_jh, "file as BJ_IO"); 391 JBUFFER_TRACE(new_jh, "file as BJ_IO");
390 journal_file_buffer(new_jh, transaction, BJ_IO); 392 journal_file_buffer(new_jh, transaction, BJ_IO);
391 393
@@ -848,6 +850,12 @@ static int journal_reset(journal_t *journal)
848 850
849 first = be32_to_cpu(sb->s_first); 851 first = be32_to_cpu(sb->s_first);
850 last = be32_to_cpu(sb->s_maxlen); 852 last = be32_to_cpu(sb->s_maxlen);
853 if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
854 printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n",
855 first, last);
856 journal_fail_superblock(journal);
857 return -EINVAL;
858 }
851 859
852 journal->j_first = first; 860 journal->j_first = first;
853 journal->j_last = last; 861 journal->j_last = last;
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 73242ba7c7b..c03ac11f74b 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal)
489 wake_up(&journal->j_wait_transaction_locked); 489 wake_up(&journal->j_wait_transaction_locked);
490} 490}
491 491
492/* 492static void warn_dirty_buffer(struct buffer_head *bh)
493 * Report any unexpected dirty buffers which turn up. Normally those
494 * indicate an error, but they can occur if the user is running (say)
495 * tune2fs to modify the live filesystem, so we need the option of
496 * continuing as gracefully as possible. #
497 *
498 * The caller should already hold the journal lock and
499 * j_list_lock spinlock: most callers will need those anyway
500 * in order to probe the buffer's journaling state safely.
501 */
502static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
503{ 493{
504 int jlist; 494 char b[BDEVNAME_SIZE];
505
506 /* If this buffer is one which might reasonably be dirty
507 * --- ie. data, or not part of this journal --- then
508 * we're OK to leave it alone, but otherwise we need to
509 * move the dirty bit to the journal's own internal
510 * JBDDirty bit. */
511 jlist = jh->b_jlist;
512 495
513 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 496 printk(KERN_WARNING
514 jlist == BJ_Shadow || jlist == BJ_Forget) { 497 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
515 struct buffer_head *bh = jh2bh(jh); 498 "There's a risk of filesystem corruption in case of system "
516 499 "crash.\n",
517 if (test_clear_buffer_dirty(bh)) 500 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
518 set_buffer_jbddirty(bh);
519 }
520} 501}
521 502
522/* 503/*
@@ -583,14 +564,16 @@ repeat:
583 if (jh->b_next_transaction) 564 if (jh->b_next_transaction)
584 J_ASSERT_JH(jh, jh->b_next_transaction == 565 J_ASSERT_JH(jh, jh->b_next_transaction ==
585 transaction); 566 transaction);
567 warn_dirty_buffer(bh);
586 } 568 }
587 /* 569 /*
588 * In any case we need to clean the dirty flag and we must 570 * In any case we need to clean the dirty flag and we must
589 * do it under the buffer lock to be sure we don't race 571 * do it under the buffer lock to be sure we don't race
590 * with running write-out. 572 * with running write-out.
591 */ 573 */
592 JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 574 JBUFFER_TRACE(jh, "Journalling dirty buffer");
593 jbd_unexpected_dirty_buffer(jh); 575 clear_buffer_dirty(bh);
576 set_buffer_jbddirty(bh);
594 } 577 }
595 578
596 unlock_buffer(bh); 579 unlock_buffer(bh);
@@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
826 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 809 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
827 810
828 if (jh->b_transaction == NULL) { 811 if (jh->b_transaction == NULL) {
812 /*
813 * Previous journal_forget() could have left the buffer
814 * with jbddirty bit set because it was being committed. When
815 * the commit finished, we've filed the buffer for
816 * checkpointing and marked it dirty. Now we are reallocating
817 * the buffer so the transaction freeing it must have
818 * committed and so it's safe to clear the dirty bit.
819 */
820 clear_buffer_dirty(jh2bh(jh));
829 jh->b_transaction = transaction; 821 jh->b_transaction = transaction;
830 822
831 /* first access by this transaction */ 823 /* first access by this transaction */
@@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1782 1774
1783 if (jh->b_cp_transaction) { 1775 if (jh->b_cp_transaction) {
1784 JBUFFER_TRACE(jh, "on running+cp transaction"); 1776 JBUFFER_TRACE(jh, "on running+cp transaction");
1777 /*
1778 * We don't want to write the buffer anymore, clear the
1779 * bit so that we don't confuse checks in
1780 * __journal_file_buffer
1781 */
1782 clear_buffer_dirty(bh);
1785 __journal_file_buffer(jh, transaction, BJ_Forget); 1783 __journal_file_buffer(jh, transaction, BJ_Forget);
1786 clear_buffer_jbddirty(bh);
1787 may_free = 0; 1784 may_free = 0;
1788 } else { 1785 } else {
1789 JBUFFER_TRACE(jh, "on running transaction"); 1786 JBUFFER_TRACE(jh, "on running transaction");
@@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh,
2041 if (jh->b_transaction && jh->b_jlist == jlist) 2038 if (jh->b_transaction && jh->b_jlist == jlist)
2042 return; 2039 return;
2043 2040
2044 /* The following list of buffer states needs to be consistent
2045 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
2046 * state. */
2047
2048 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 2041 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2049 jlist == BJ_Shadow || jlist == BJ_Forget) { 2042 jlist == BJ_Shadow || jlist == BJ_Forget) {
2043 /*
2044 * For metadata buffers, we track dirty bit in buffer_jbddirty
2045 * instead of buffer_dirty. We should not see a dirty bit set
2046 * here because we clear it in do_get_write_access but e.g.
2047 * tune2fs can modify the sb and set the dirty bit at any time
2048 * so we try to gracefully handle that.
2049 */
2050 if (buffer_dirty(bh))
2051 warn_dirty_buffer(bh);
2050 if (test_clear_buffer_dirty(bh) || 2052 if (test_clear_buffer_dirty(bh) ||
2051 test_clear_buffer_jbddirty(bh)) 2053 test_clear_buffer_jbddirty(bh))
2052 was_dirty = 1; 2054 was_dirty = 1;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 18bfd5dab64..e378cb38397 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
297 unsigned int new_offset; 297 unsigned int new_offset;
298 struct buffer_head *bh_in = jh2bh(jh_in); 298 struct buffer_head *bh_in = jh2bh(jh_in);
299 struct jbd2_buffer_trigger_type *triggers; 299 struct jbd2_buffer_trigger_type *triggers;
300 journal_t *journal = transaction->t_journal;
300 301
301 /* 302 /*
302 * The buffer really shouldn't be locked: only the current committing 303 * The buffer really shouldn't be locked: only the current committing
@@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
310 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); 311 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
311 312
312 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); 313 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
314 /* keep subsequent assertions sane */
315 new_bh->b_state = 0;
316 init_buffer(new_bh, NULL, NULL);
317 atomic_set(&new_bh->b_count, 1);
318 new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
313 319
314 /* 320 /*
315 * If a new transaction has already done a buffer copy-out, then 321 * If a new transaction has already done a buffer copy-out, then
@@ -388,14 +394,6 @@ repeat:
388 kunmap_atomic(mapped_data, KM_USER0); 394 kunmap_atomic(mapped_data, KM_USER0);
389 } 395 }
390 396
391 /* keep subsequent assertions sane */
392 new_bh->b_state = 0;
393 init_buffer(new_bh, NULL, NULL);
394 atomic_set(&new_bh->b_count, 1);
395 jbd_unlock_bh_state(bh_in);
396
397 new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
398
399 set_bh_page(new_bh, new_page, new_offset); 397 set_bh_page(new_bh, new_page, new_offset);
400 new_jh->b_transaction = NULL; 398 new_jh->b_transaction = NULL;
401 new_bh->b_size = jh2bh(jh_in)->b_size; 399 new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -412,7 +410,11 @@ repeat:
412 * copying is moved to the transaction's shadow queue. 410 * copying is moved to the transaction's shadow queue.
413 */ 411 */
414 JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); 412 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
415 jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); 413 spin_lock(&journal->j_list_lock);
414 __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
415 spin_unlock(&journal->j_list_lock);
416 jbd_unlock_bh_state(bh_in);
417
416 JBUFFER_TRACE(new_jh, "file as BJ_IO"); 418 JBUFFER_TRACE(new_jh, "file as BJ_IO");
417 jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); 419 jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
418 420
@@ -2410,6 +2412,7 @@ const char *jbd2_dev_to_name(dev_t device)
2410 int i = hash_32(device, CACHE_SIZE_BITS); 2412 int i = hash_32(device, CACHE_SIZE_BITS);
2411 char *ret; 2413 char *ret;
2412 struct block_device *bd; 2414 struct block_device *bd;
2415 static struct devname_cache *new_dev;
2413 2416
2414 rcu_read_lock(); 2417 rcu_read_lock();
2415 if (devcache[i] && devcache[i]->device == device) { 2418 if (devcache[i] && devcache[i]->device == device) {
@@ -2419,20 +2422,20 @@ const char *jbd2_dev_to_name(dev_t device)
2419 } 2422 }
2420 rcu_read_unlock(); 2423 rcu_read_unlock();
2421 2424
2425 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
2426 if (!new_dev)
2427 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
2422 spin_lock(&devname_cache_lock); 2428 spin_lock(&devname_cache_lock);
2423 if (devcache[i]) { 2429 if (devcache[i]) {
2424 if (devcache[i]->device == device) { 2430 if (devcache[i]->device == device) {
2431 kfree(new_dev);
2425 ret = devcache[i]->devname; 2432 ret = devcache[i]->devname;
2426 spin_unlock(&devname_cache_lock); 2433 spin_unlock(&devname_cache_lock);
2427 return ret; 2434 return ret;
2428 } 2435 }
2429 call_rcu(&devcache[i]->rcu, free_devcache); 2436 call_rcu(&devcache[i]->rcu, free_devcache);
2430 } 2437 }
2431 devcache[i] = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); 2438 devcache[i] = new_dev;
2432 if (!devcache[i]) {
2433 spin_unlock(&devname_cache_lock);
2434 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
2435 }
2436 devcache[i]->device = device; 2439 devcache[i]->device = device;
2437 bd = bdget(device); 2440 bd = bdget(device);
2438 if (bd) { 2441 if (bd) {
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 494501edba6..6213ac728f3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal)
499 wake_up(&journal->j_wait_transaction_locked); 499 wake_up(&journal->j_wait_transaction_locked);
500} 500}
501 501
502/* 502static void warn_dirty_buffer(struct buffer_head *bh)
503 * Report any unexpected dirty buffers which turn up. Normally those
504 * indicate an error, but they can occur if the user is running (say)
505 * tune2fs to modify the live filesystem, so we need the option of
506 * continuing as gracefully as possible. #
507 *
508 * The caller should already hold the journal lock and
509 * j_list_lock spinlock: most callers will need those anyway
510 * in order to probe the buffer's journaling state safely.
511 */
512static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
513{ 503{
514 int jlist; 504 char b[BDEVNAME_SIZE];
515
516 /* If this buffer is one which might reasonably be dirty
517 * --- ie. data, or not part of this journal --- then
518 * we're OK to leave it alone, but otherwise we need to
519 * move the dirty bit to the journal's own internal
520 * JBDDirty bit. */
521 jlist = jh->b_jlist;
522 505
523 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 506 printk(KERN_WARNING
524 jlist == BJ_Shadow || jlist == BJ_Forget) { 507 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
525 struct buffer_head *bh = jh2bh(jh); 508 "There's a risk of filesystem corruption in case of system "
526 509 "crash.\n",
527 if (test_clear_buffer_dirty(bh)) 510 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
528 set_buffer_jbddirty(bh);
529 }
530} 511}
531 512
532/* 513/*
@@ -593,14 +574,16 @@ repeat:
593 if (jh->b_next_transaction) 574 if (jh->b_next_transaction)
594 J_ASSERT_JH(jh, jh->b_next_transaction == 575 J_ASSERT_JH(jh, jh->b_next_transaction ==
595 transaction); 576 transaction);
577 warn_dirty_buffer(bh);
596 } 578 }
597 /* 579 /*
598 * In any case we need to clean the dirty flag and we must 580 * In any case we need to clean the dirty flag and we must
599 * do it under the buffer lock to be sure we don't race 581 * do it under the buffer lock to be sure we don't race
600 * with running write-out. 582 * with running write-out.
601 */ 583 */
602 JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 584 JBUFFER_TRACE(jh, "Journalling dirty buffer");
603 jbd_unexpected_dirty_buffer(jh); 585 clear_buffer_dirty(bh);
586 set_buffer_jbddirty(bh);
604 } 587 }
605 588
606 unlock_buffer(bh); 589 unlock_buffer(bh);
@@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
843 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 826 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
844 827
845 if (jh->b_transaction == NULL) { 828 if (jh->b_transaction == NULL) {
829 /*
830 * Previous jbd2_journal_forget() could have left the buffer
831 * with jbddirty bit set because it was being committed. When
832 * the commit finished, we've filed the buffer for
833 * checkpointing and marked it dirty. Now we are reallocating
834 * the buffer so the transaction freeing it must have
835 * committed and so it's safe to clear the dirty bit.
836 */
837 clear_buffer_dirty(jh2bh(jh));
846 jh->b_transaction = transaction; 838 jh->b_transaction = transaction;
847 839
848 /* first access by this transaction */ 840 /* first access by this transaction */
@@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1644 1636
1645 if (jh->b_cp_transaction) { 1637 if (jh->b_cp_transaction) {
1646 JBUFFER_TRACE(jh, "on running+cp transaction"); 1638 JBUFFER_TRACE(jh, "on running+cp transaction");
1639 /*
1640 * We don't want to write the buffer anymore, clear the
1641 * bit so that we don't confuse checks in
1642 * __journal_file_buffer
1643 */
1644 clear_buffer_dirty(bh);
1647 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1645 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1648 clear_buffer_jbddirty(bh);
1649 may_free = 0; 1646 may_free = 0;
1650 } else { 1647 } else {
1651 JBUFFER_TRACE(jh, "on running transaction"); 1648 JBUFFER_TRACE(jh, "on running transaction");
@@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
1896 if (jh->b_transaction && jh->b_jlist == jlist) 1893 if (jh->b_transaction && jh->b_jlist == jlist)
1897 return; 1894 return;
1898 1895
1899 /* The following list of buffer states needs to be consistent
1900 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1901 * state. */
1902
1903 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 1896 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1904 jlist == BJ_Shadow || jlist == BJ_Forget) { 1897 jlist == BJ_Shadow || jlist == BJ_Forget) {
1898 /*
1899 * For metadata buffers, we track dirty bit in buffer_jbddirty
1900 * instead of buffer_dirty. We should not see a dirty bit set
1901 * here because we clear it in do_get_write_access but e.g.
1902 * tune2fs can modify the sb and set the dirty bit at any time
1903 * so we try to gracefully handle that.
1904 */
1905 if (buffer_dirty(bh))
1906 warn_dirty_buffer(bh);
1905 if (test_clear_buffer_dirty(bh) || 1907 if (test_clear_buffer_dirty(bh) ||
1906 test_clear_buffer_jbddirty(bh)) 1908 test_clear_buffer_jbddirty(bh))
1907 was_dirty = 1; 1909 was_dirty = 1;
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index a0244740b75..b47679be118 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -270,19 +270,21 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
270 D2({ 270 D2({
271 int i=0; 271 int i=0;
272 struct jffs2_raw_node_ref *this; 272 struct jffs2_raw_node_ref *this;
273 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); 273 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
274 274
275 this = ic->nodes; 275 this = ic->nodes;
276 276
277 printk(KERN_DEBUG);
277 while(this) { 278 while(this) {
278 printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); 279 printk(KERN_CONT "0x%08x(%d)->",
280 ref_offset(this), ref_flags(this));
279 if (++i == 5) { 281 if (++i == 5) {
280 printk("\n" KERN_DEBUG); 282 printk(KERN_DEBUG);
281 i=0; 283 i=0;
282 } 284 }
283 this = this->next_in_ino; 285 this = this->next_in_ino;
284 } 286 }
285 printk("\n"); 287 printk(KERN_CONT "\n");
286 }); 288 });
287 289
288 switch (ic->class) { 290 switch (ic->class) {
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 5edc2bf2058..23c94753986 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
99 kunmap(pg); 99 kunmap(pg);
100 100
101 D2(printk(KERN_DEBUG "readpage finished\n")); 101 D2(printk(KERN_DEBUG "readpage finished\n"));
102 return 0; 102 return ret;
103} 103}
104 104
105int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) 105int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7515e73e2bf..696686cc206 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
130 if (jffs2_sum_active()) { 130 if (jffs2_sum_active()) {
131 s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); 131 s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
132 if (!s) { 132 if (!s) {
133 kfree(flashbuf);
134 JFFS2_WARNING("Can't allocate memory for summary\n"); 133 JFFS2_WARNING("Can't allocate memory for summary\n");
135 return -ENOMEM; 134 ret = -ENOMEM;
135 goto out;
136 } 136 }
137 } 137 }
138 138
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 07a22caf268..0035c021395 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smp_lock.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 91fa3ad6e8c..a29c7c3e3fb 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
67 acl = posix_acl_from_xattr(value, size); 67 acl = posix_acl_from_xattr(value, size);
68 } 68 }
69 kfree(value); 69 kfree(value);
70 if (!IS_ERR(acl)) { 70 if (!IS_ERR(acl))
71 set_cached_acl(inode, type, acl); 71 set_cached_acl(inode, type, acl);
72 posix_acl_release(acl);
73 }
74 return acl; 72 return acl;
75} 73}
76 74
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index f2fdcbce143..4336adba952 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/smp_lock.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/fs.h> 13#include <linux/fs.h>
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 1725037374c..bd173a6ca3b 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -10,6 +10,7 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h>
13#include <linux/in.h> 14#include <linux/in.h>
14#include <linux/sunrpc/svc.h> 15#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/clnt.h> 16#include <linux/sunrpc/clnt.h>
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 3688e55901f..e1d28ddd216 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -10,6 +10,7 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h>
13#include <linux/in.h> 14#include <linux/in.h>
14#include <linux/sunrpc/svc.h> 15#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/clnt.h> 16#include <linux/sunrpc/clnt.h>
diff --git a/fs/namei.c b/fs/namei.c
index 5b961eb71cb..f3c5b278895 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1761,6 +1761,10 @@ do_last:
1761 goto exit; 1761 goto exit;
1762 } 1762 }
1763 filp = nameidata_to_filp(&nd, open_flag); 1763 filp = nameidata_to_filp(&nd, open_flag);
1764 if (IS_ERR(filp))
1765 ima_counts_put(&nd.path,
1766 acc_mode & (MAY_READ | MAY_WRITE |
1767 MAY_EXEC));
1764 mnt_drop_write(nd.path.mnt); 1768 mnt_drop_write(nd.path.mnt);
1765 if (nd.root.mnt) 1769 if (nd.root.mnt)
1766 path_put(&nd.root); 1770 path_put(&nd.root);
@@ -1817,6 +1821,9 @@ ok:
1817 goto exit; 1821 goto exit;
1818 } 1822 }
1819 filp = nameidata_to_filp(&nd, open_flag); 1823 filp = nameidata_to_filp(&nd, open_flag);
1824 if (IS_ERR(filp))
1825 ima_counts_put(&nd.path,
1826 acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
1820 /* 1827 /*
1821 * It is now safe to drop the mnt write 1828 * It is now safe to drop the mnt write
1822 * because the filp has had a write taken 1829 * because the filp has had a write taken
diff --git a/fs/namespace.c b/fs/namespace.c
index 3dc283fd471..7230787d18b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -22,6 +22,7 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/mnt_namespace.h> 23#include <linux/mnt_namespace.h>
24#include <linux/namei.h> 24#include <linux/namei.h>
25#include <linux/nsproxy.h>
25#include <linux/security.h> 26#include <linux/security.h>
26#include <linux/mount.h> 27#include <linux/mount.h>
27#include <linux/ramfs.h> 28#include <linux/ramfs.h>
@@ -315,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
315 */ 316 */
316int mnt_want_write_file(struct file *file) 317int mnt_want_write_file(struct file *file)
317{ 318{
318 if (!(file->f_mode & FMODE_WRITE)) 319 struct inode *inode = file->f_dentry->d_inode;
320 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
319 return mnt_want_write(file->f_path.mnt); 321 return mnt_want_write(file->f_path.mnt);
320 else 322 else
321 return mnt_clone_write(file->f_path.mnt); 323 return mnt_clone_write(file->f_path.mnt);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c2d061675d8..8d25ccb2d51 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1242,20 +1242,6 @@ error:
1242 return error; 1242 return error;
1243} 1243}
1244 1244
1245/*
1246 * Initialize a session.
1247 * Note: save the mount rsize and wsize for create_server negotiation.
1248 */
1249static void nfs4_init_session(struct nfs_client *clp,
1250 unsigned int wsize, unsigned int rsize)
1251{
1252#if defined(CONFIG_NFS_V4_1)
1253 if (nfs4_has_session(clp)) {
1254 clp->cl_session->fc_attrs.max_rqst_sz = wsize;
1255 clp->cl_session->fc_attrs.max_resp_sz = rsize;
1256 }
1257#endif /* CONFIG_NFS_V4_1 */
1258}
1259 1245
1260/* 1246/*
1261 * Session has been established, and the client marked ready. 1247 * Session has been established, and the client marked ready.
@@ -1350,7 +1336,9 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
1350 BUG_ON(!server->nfs_client->rpc_ops); 1336 BUG_ON(!server->nfs_client->rpc_ops);
1351 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); 1337 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
1352 1338
1353 nfs4_init_session(server->nfs_client, server->wsize, server->rsize); 1339 error = nfs4_init_session(server);
1340 if (error < 0)
1341 goto error;
1354 1342
1355 /* Probe the root fh to retrieve its FSID */ 1343 /* Probe the root fh to retrieve its FSID */
1356 error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path); 1344 error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index af05b918cb5..6dd48a4405b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -10,6 +10,7 @@
10#include <linux/kthread.h> 10#include <linux/kthread.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/smp_lock.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14 15
15#include <linux/nfs4.h> 16#include <linux/nfs4.h>
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 89f98e9a024..32062c33c85 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -29,7 +29,6 @@
29#include <linux/nfs_fs.h> 29#include <linux/nfs_fs.h>
30#include <linux/nfs_mount.h> 30#include <linux/nfs_mount.h>
31#include <linux/pagemap.h> 31#include <linux/pagemap.h>
32#include <linux/smp_lock.h>
33#include <linux/pagevec.h> 32#include <linux/pagevec.h>
34#include <linux/namei.h> 33#include <linux/namei.h>
35#include <linux/mount.h> 34#include <linux/mount.h>
@@ -1026,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1026 res = NULL; 1025 res = NULL;
1027 goto out; 1026 goto out;
1028 /* This turned out not to be a regular file */ 1027 /* This turned out not to be a regular file */
1029 case -EISDIR:
1030 case -ENOTDIR: 1028 case -ENOTDIR:
1031 goto no_open; 1029 goto no_open;
1032 case -ELOOP: 1030 case -ELOOP:
1033 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1031 if (!(nd->intent.open.flags & O_NOFOLLOW))
1034 goto no_open; 1032 goto no_open;
1033 /* case -EISDIR: */
1035 /* case -EINVAL: */ 1034 /* case -EINVAL: */
1036 default: 1035 default:
1037 goto out; 1036 goto out;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0055b813ec2..05062329b67 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -26,7 +26,6 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/pagemap.h> 28#include <linux/pagemap.h>
29#include <linux/smp_lock.h>
30#include <linux/aio.h> 29#include <linux/aio.h>
31 30
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 46177cb8706..b35d2a61606 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -30,7 +30,6 @@
30#include <linux/nfs_idmap.h> 30#include <linux/nfs_idmap.h>
31#include <linux/vfs.h> 31#include <linux/vfs.h>
32#include <linux/namei.h> 32#include <linux/namei.h>
33#include <linux/mnt_namespace.h>
34#include <linux/security.h> 33#include <linux/security.h>
35 34
36#include <asm/system.h> 35#include <asm/system.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 64f87194d39..bd7938eda6a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -30,7 +30,6 @@
30#include <linux/nfs_mount.h> 30#include <linux/nfs_mount.h>
31#include <linux/nfs4_mount.h> 31#include <linux/nfs4_mount.h>
32#include <linux/lockd/bind.h> 32#include <linux/lockd/bind.h>
33#include <linux/smp_lock.h>
34#include <linux/seq_file.h> 33#include <linux/seq_file.h>
35#include <linux/mount.h> 34#include <linux/mount.h>
36#include <linux/nfs_idmap.h> 35#include <linux/nfs_idmap.h>
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 61bc3a32e1e..6ea07a3c75d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -220,6 +220,7 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
220extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp); 220extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
221extern int nfs4_proc_create_session(struct nfs_client *, int reset); 221extern int nfs4_proc_create_session(struct nfs_client *, int reset);
222extern int nfs4_proc_destroy_session(struct nfs4_session *); 222extern int nfs4_proc_destroy_session(struct nfs4_session *);
223extern int nfs4_init_session(struct nfs_server *server);
223#else /* CONFIG_NFS_v4_1 */ 224#else /* CONFIG_NFS_v4_1 */
224static inline int nfs4_setup_sequence(struct nfs_client *clp, 225static inline int nfs4_setup_sequence(struct nfs_client *clp,
225 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, 226 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -227,6 +228,11 @@ static inline int nfs4_setup_sequence(struct nfs_client *clp,
227{ 228{
228 return 0; 229 return 0;
229} 230}
231
232static inline int nfs4_init_session(struct nfs_server *server)
233{
234 return 0;
235}
230#endif /* CONFIG_NFS_V4_1 */ 236#endif /* CONFIG_NFS_V4_1 */
231 237
232extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[]; 238extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 92ce4351781..6917311f201 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -45,7 +45,6 @@
45#include <linux/nfs4.h> 45#include <linux/nfs4.h>
46#include <linux/nfs_fs.h> 46#include <linux/nfs_fs.h>
47#include <linux/nfs_page.h> 47#include <linux/nfs_page.h>
48#include <linux/smp_lock.h>
49#include <linux/namei.h> 48#include <linux/namei.h>
50#include <linux/mount.h> 49#include <linux/mount.h>
51#include <linux/module.h> 50#include <linux/module.h>
@@ -2041,15 +2040,9 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2041 .rpc_argp = &args, 2040 .rpc_argp = &args,
2042 .rpc_resp = &res, 2041 .rpc_resp = &res,
2043 }; 2042 };
2044 int status;
2045 2043
2046 nfs_fattr_init(info->fattr); 2044 nfs_fattr_init(info->fattr);
2047 status = nfs4_recover_expired_lease(server); 2045 return nfs4_call_sync(server, &msg, &args, &res, 0);
2048 if (!status)
2049 status = nfs4_check_client_ready(server->nfs_client);
2050 if (!status)
2051 status = nfs4_call_sync(server, &msg, &args, &res, 0);
2052 return status;
2053} 2046}
2054 2047
2055static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2048static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4100,15 +4093,23 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4100 if (request->fl_start < 0 || request->fl_end < 0) 4093 if (request->fl_start < 0 || request->fl_end < 0)
4101 return -EINVAL; 4094 return -EINVAL;
4102 4095
4103 if (IS_GETLK(cmd)) 4096 if (IS_GETLK(cmd)) {
4104 return nfs4_proc_getlk(state, F_GETLK, request); 4097 if (state != NULL)
4098 return nfs4_proc_getlk(state, F_GETLK, request);
4099 return 0;
4100 }
4105 4101
4106 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 4102 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4107 return -EINVAL; 4103 return -EINVAL;
4108 4104
4109 if (request->fl_type == F_UNLCK) 4105 if (request->fl_type == F_UNLCK) {
4110 return nfs4_proc_unlck(state, cmd, request); 4106 if (state != NULL)
4107 return nfs4_proc_unlck(state, cmd, request);
4108 return 0;
4109 }
4111 4110
4111 if (state == NULL)
4112 return -ENOLCK;
4112 do { 4113 do {
4113 status = nfs4_proc_setlk(state, cmd, request); 4114 status = nfs4_proc_setlk(state, cmd, request);
4114 if ((status != -EAGAIN) || IS_SETLK(cmd)) 4115 if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4794,6 +4795,22 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
4794 return status; 4795 return status;
4795} 4796}
4796 4797
4798int nfs4_init_session(struct nfs_server *server)
4799{
4800 struct nfs_client *clp = server->nfs_client;
4801 int ret;
4802
4803 if (!nfs4_has_session(clp))
4804 return 0;
4805
4806 clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
4807 clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
4808 ret = nfs4_recover_expired_lease(server);
4809 if (!ret)
4810 ret = nfs4_check_client_ready(clp);
4811 return ret;
4812}
4813
4797/* 4814/*
4798 * Renew the cl_session lease. 4815 * Renew the cl_session lease.
4799 */ 4816 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b73c5a72865..65ca8c18476 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -553,6 +553,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
553 INIT_LIST_HEAD(&lsp->ls_sequence.list); 553 INIT_LIST_HEAD(&lsp->ls_sequence.list);
554 lsp->ls_seqid.sequence = &lsp->ls_sequence; 554 lsp->ls_seqid.sequence = &lsp->ls_sequence;
555 atomic_set(&lsp->ls_count, 1); 555 atomic_set(&lsp->ls_count, 1);
556 lsp->ls_state = state;
556 lsp->ls_owner = fl_owner; 557 lsp->ls_owner = fl_owner;
557 spin_lock(&clp->cl_lock); 558 spin_lock(&clp->cl_lock);
558 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); 559 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
@@ -587,7 +588,6 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
587 if (lsp != NULL) 588 if (lsp != NULL)
588 break; 589 break;
589 if (new != NULL) { 590 if (new != NULL) {
590 new->ls_state = state;
591 list_add(&new->ls_locks, &state->lock_states); 591 list_add(&new->ls_locks, &state->lock_states);
592 set_bit(LK_STATE_IN_USE, &state->flags); 592 set_bit(LK_STATE_IN_USE, &state->flags);
593 lsp = new; 593 lsp = new;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 96c4ebfa46f..73ea5e8d66c 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -18,7 +18,6 @@
18#include <linux/sunrpc/clnt.h> 18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h> 19#include <linux/nfs_fs.h>
20#include <linux/nfs_page.h> 20#include <linux/nfs_page.h>
21#include <linux/smp_lock.h>
22 21
23#include <asm/system.h> 22#include <asm/system.h>
24 23
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ce728829f79..0a0a2ff767c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -202,8 +202,10 @@ static int nfs_set_page_writeback(struct page *page)
202 struct nfs_server *nfss = NFS_SERVER(inode); 202 struct nfs_server *nfss = NFS_SERVER(inode);
203 203
204 if (atomic_long_inc_return(&nfss->writeback) > 204 if (atomic_long_inc_return(&nfss->writeback) >
205 NFS_CONGESTION_ON_THRESH) 205 NFS_CONGESTION_ON_THRESH) {
206 set_bdi_congested(&nfss->backing_dev_info, WRITE); 206 set_bdi_congested(&nfss->backing_dev_info,
207 BLK_RW_ASYNC);
208 }
207 } 209 }
208 return ret; 210 return ret;
209} 211}
@@ -215,7 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
215 217
216 end_page_writeback(page); 218 end_page_writeback(page);
217 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 219 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
218 clear_bdi_congested(&nfss->backing_dev_info, WRITE); 220 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
219} 221}
220 222
221/* 223/*
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1250fb978ac..6d0847562d8 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -25,7 +25,6 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/inet.h> 26#include <linux/inet.h>
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/smp_lock.h>
29#include <linux/ctype.h> 28#include <linux/ctype.h>
30 29
31#include <linux/nfs.h> 30#include <linux/nfs.h>
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index d4c9884cd54..492c79b7800 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -18,7 +18,6 @@
18#include <linux/unistd.h> 18#include <linux/unistd.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/freezer.h> 21#include <linux/freezer.h>
23#include <linux/fs_struct.h> 22#include <linux/fs_struct.h>
24#include <linux/kthread.h> 23#include <linux/kthread.h>
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4145083dcf8..23341c1063b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -678,7 +678,6 @@ __be32
678nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, 678nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
679 int access, struct file **filp) 679 int access, struct file **filp)
680{ 680{
681 const struct cred *cred = current_cred();
682 struct dentry *dentry; 681 struct dentry *dentry;
683 struct inode *inode; 682 struct inode *inode;
684 int flags = O_RDONLY|O_LARGEFILE; 683 int flags = O_RDONLY|O_LARGEFILE;
@@ -733,7 +732,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
733 vfs_dq_init(inode); 732 vfs_dq_init(inode);
734 } 733 }
735 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), 734 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
736 flags, cred); 735 flags, current_cred());
737 if (IS_ERR(*filp)) 736 if (IS_ERR(*filp))
738 host_err = PTR_ERR(*filp); 737 host_err = PTR_ERR(*filp);
739 else 738 else
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
new file mode 100644
index 00000000000..72da095d400
--- /dev/null
+++ b/fs/nilfs2/Kconfig
@@ -0,0 +1,25 @@
1config NILFS2_FS
2 tristate "NILFS2 file system support (EXPERIMENTAL)"
3 depends on BLOCK && EXPERIMENTAL
4 select CRC32
5 help
6 NILFS2 is a log-structured file system (LFS) supporting continuous
7 snapshotting. In addition to versioning capability of the entire
8 file system, users can even restore files mistakenly overwritten or
9 destroyed just a few seconds ago. Since this file system can keep
10 consistency like conventional LFS, it achieves quick recovery after
11 system crashes.
12
13 NILFS2 creates a number of checkpoints every few seconds or per
14 synchronous write basis (unless there is no change). Users can
15 select significant versions among continuously created checkpoints,
16 and can change them into snapshots which will be preserved for long
17 periods until they are changed back to checkpoints. Each
18 snapshot is mountable as a read-only file system concurrently with
19 its writable mount, and this feature is convenient for online backup.
20
21 Some features including atime, extended attributes, and POSIX ACLs,
22 are not supported yet.
23
24 To compile this file system support as a module, choose M here: the
25 module will be called nilfs2. If unsure, say N.
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 36df60b6d8a..99d58a028b9 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -568,6 +568,7 @@ void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap,
568} 568}
569 569
570static struct lock_class_key nilfs_bmap_dat_lock_key; 570static struct lock_class_key nilfs_bmap_dat_lock_key;
571static struct lock_class_key nilfs_bmap_mdt_lock_key;
571 572
572/** 573/**
573 * nilfs_bmap_read - read a bmap from an inode 574 * nilfs_bmap_read - read a bmap from an inode
@@ -603,7 +604,11 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
603 bmap->b_ptr_type = NILFS_BMAP_PTR_VS; 604 bmap->b_ptr_type = NILFS_BMAP_PTR_VS;
604 bmap->b_last_allocated_key = 0; 605 bmap->b_last_allocated_key = 0;
605 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; 606 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
607 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
606 break; 608 break;
609 case NILFS_IFILE_INO:
610 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
611 /* Fall through */
607 default: 612 default:
608 bmap->b_ptr_type = NILFS_BMAP_PTR_VM; 613 bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
609 bmap->b_last_allocated_key = 0; 614 bmap->b_last_allocated_key = 0;
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 7d49813f66d..aec942cf79e 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -307,7 +307,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
308 if (ret < 0) { 308 if (ret < 0) {
309 if (ret != -ENOENT) 309 if (ret != -ENOENT)
310 goto out_header; 310 break;
311 /* skip hole */ 311 /* skip hole */
312 ret = 0; 312 ret = 0;
313 continue; 313 continue;
@@ -340,7 +340,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
340 continue; 340 continue;
341 printk(KERN_ERR "%s: cannot delete block\n", 341 printk(KERN_ERR "%s: cannot delete block\n",
342 __func__); 342 __func__);
343 goto out_header; 343 break;
344 } 344 }
345 } 345 }
346 346
@@ -358,7 +358,6 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
358 kunmap_atomic(kaddr, KM_USER0); 358 kunmap_atomic(kaddr, KM_USER0);
359 } 359 }
360 360
361 out_header:
362 brelse(header_bh); 361 brelse(header_bh);
363 362
364 out_sem: 363 out_sem:
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 0b2710e2d56..8927ca27e6f 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -134,15 +134,6 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
134 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 134 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
135 req->pr_entry_bh, kaddr); 135 req->pr_entry_bh, kaddr);
136 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); 136 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
137 if (entry->de_blocknr != cpu_to_le64(0) ||
138 entry->de_end != cpu_to_le64(NILFS_CNO_MAX)) {
139 printk(KERN_CRIT
140 "%s: vbn = %llu, start = %llu, end = %llu, pbn = %llu\n",
141 __func__, (unsigned long long)req->pr_entry_nr,
142 (unsigned long long)le64_to_cpu(entry->de_start),
143 (unsigned long long)le64_to_cpu(entry->de_end),
144 (unsigned long long)le64_to_cpu(entry->de_blocknr));
145 }
146 entry->de_blocknr = cpu_to_le64(blocknr); 137 entry->de_blocknr = cpu_to_le64(blocknr);
147 kunmap_atomic(kaddr, KM_USER0); 138 kunmap_atomic(kaddr, KM_USER0);
148 139
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 54100acc110..1a4fa04cf07 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -43,7 +43,6 @@
43 */ 43 */
44 44
45#include <linux/pagemap.h> 45#include <linux/pagemap.h>
46#include <linux/smp_lock.h>
47#include "nilfs.h" 46#include "nilfs.h"
48#include "page.h" 47#include "page.h"
49 48
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 3d3ddb3f517..2dfd47714ae 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
412 return 0; /* Do not request flush for shadow page cache */ 412 return 0; /* Do not request flush for shadow page cache */
413 if (!sb) { 413 if (!sb) {
414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); 414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
415 if (!writer) 415 if (!writer) {
416 nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
416 return -EROFS; 417 return -EROFS;
418 }
417 sb = writer->s_super; 419 sb = writer->s_super;
418 } 420 }
419 421
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index aa977549919..51ff3d0a4ee 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1829,26 +1829,13 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1829 err = nilfs_segbuf_write(segbuf, &wi); 1829 err = nilfs_segbuf_write(segbuf, &wi);
1830 1830
1831 res = nilfs_segbuf_wait(segbuf, &wi); 1831 res = nilfs_segbuf_wait(segbuf, &wi);
1832 err = unlikely(err) ? : res; 1832 err = err ? : res;
1833 if (unlikely(err)) 1833 if (err)
1834 return err; 1834 return err;
1835 } 1835 }
1836 return 0; 1836 return 0;
1837} 1837}
1838 1838
1839static int nilfs_page_has_uncleared_buffer(struct page *page)
1840{
1841 struct buffer_head *head, *bh;
1842
1843 head = bh = page_buffers(page);
1844 do {
1845 if (buffer_dirty(bh) && !list_empty(&bh->b_assoc_buffers))
1846 return 1;
1847 bh = bh->b_this_page;
1848 } while (bh != head);
1849 return 0;
1850}
1851
1852static void __nilfs_end_page_io(struct page *page, int err) 1839static void __nilfs_end_page_io(struct page *page, int err)
1853{ 1840{
1854 if (!err) { 1841 if (!err) {
@@ -1872,13 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err)
1872 if (!page) 1859 if (!page)
1873 return; 1860 return;
1874 1861
1875 if (buffer_nilfs_node(page_buffers(page)) && 1862 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1876 nilfs_page_has_uncleared_buffer(page)) 1863 /*
1877 /* For b-tree node pages, this function may be called twice 1864 * For b-tree node pages, this function may be called twice
1878 or more because they might be split in a segment. 1865 * or more because they might be split in a segment.
1879 This check assures that cleanup has been done for all 1866 */
1880 buffers in a split btnode page. */ 1867 if (PageDirty(page)) {
1868 /*
1869 * For pages holding split b-tree node buffers, dirty
1870 * flag on the buffers may be cleared discretely.
1871 * In that case, the page is once redirtied for
1872 * remaining buffers, and it must be cancelled if
1873 * all the buffers get cleaned later.
1874 */
1875 lock_page(page);
1876 if (nilfs_page_buffers_clean(page))
1877 __nilfs_clear_page_dirty(page);
1878 unlock_page(page);
1879 }
1881 return; 1880 return;
1881 }
1882 1882
1883 __nilfs_end_page_io(page, err); 1883 __nilfs_end_page_io(page, err);
1884} 1884}
@@ -1940,7 +1940,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1940 } 1940 }
1941 if (bh->b_page != fs_page) { 1941 if (bh->b_page != fs_page) {
1942 nilfs_end_page_io(fs_page, err); 1942 nilfs_end_page_io(fs_page, err);
1943 if (unlikely(fs_page == failed_page)) 1943 if (fs_page && fs_page == failed_page)
1944 goto done; 1944 goto done;
1945 fs_page = bh->b_page; 1945 fs_page = bh->b_page;
1946 } 1946 }
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 31dac7e3b0f..dffbb0911d0 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,15 +1,5 @@
1config FSNOTIFY 1config FSNOTIFY
2 bool "Filesystem notification backend" 2 def_bool n
3 default y
4 ---help---
5 fsnotify is a backend for filesystem notification. fsnotify does
6 not provide any userspace interface but does provide the basis
7 needed for other notification schemes such as dnotify, inotify,
8 and fanotify.
9
10 Say Y here to enable fsnotify suport.
11
12 If unsure, say Y.
13 3
14source "fs/notify/dnotify/Kconfig" 4source "fs/notify/dnotify/Kconfig"
15source "fs/notify/inotify/Kconfig" 5source "fs/notify/inotify/Kconfig"
diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig
index 904ff8d5405..f9c1ca139d8 100644
--- a/fs/notify/dnotify/Kconfig
+++ b/fs/notify/dnotify/Kconfig
@@ -1,6 +1,6 @@
1config DNOTIFY 1config DNOTIFY
2 bool "Dnotify support" 2 bool "Dnotify support"
3 depends on FSNOTIFY 3 select FSNOTIFY
4 default y 4 default y
5 help 5 help
6 Dnotify is a directory-based per-fd file change notification system 6 Dnotify is a directory-based per-fd file change notification system
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ec2f7bd7681..037e878e03f 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -159,7 +159,9 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
159 if (!group->ops->should_send_event(group, to_tell, mask)) 159 if (!group->ops->should_send_event(group, to_tell, mask))
160 continue; 160 continue;
161 if (!event) { 161 if (!event) {
162 event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie); 162 event = fsnotify_create_event(to_tell, mask, data,
163 data_is, file_name, cookie,
164 GFP_KERNEL);
163 /* shit, we OOM'd and now we can't tell, maybe 165 /* shit, we OOM'd and now we can't tell, maybe
164 * someday someone else will want to do something 166 * someday someone else will want to do something
165 * here */ 167 * here */
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 5356884289a..3e56dbffe72 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -15,7 +15,7 @@ config INOTIFY
15 15
16config INOTIFY_USER 16config INOTIFY_USER
17 bool "Inotify support for userspace" 17 bool "Inotify support for userspace"
18 depends on FSNOTIFY 18 select FSNOTIFY
19 default y 19 default y
20 ---help--- 20 ---help---
21 Say Y here to enable inotify support for userspace, including the 21 Say Y here to enable inotify support for userspace, including the
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff231ad2389..f30d9bbc2e1 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly;
57 57
58static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 58static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
59struct kmem_cache *event_priv_cachep __read_mostly; 59struct kmem_cache *event_priv_cachep __read_mostly;
60static struct fsnotify_event *inotify_ignored_event;
61 60
62/* 61/*
63 * When inotify registers a new group it increments this and uses that 62 * When inotify registers a new group it increments this and uses that
@@ -296,12 +295,15 @@ static int inotify_fasync(int fd, struct file *file, int on)
296static int inotify_release(struct inode *ignored, struct file *file) 295static int inotify_release(struct inode *ignored, struct file *file)
297{ 296{
298 struct fsnotify_group *group = file->private_data; 297 struct fsnotify_group *group = file->private_data;
298 struct user_struct *user = group->inotify_data.user;
299 299
300 fsnotify_clear_marks_by_group(group); 300 fsnotify_clear_marks_by_group(group);
301 301
302 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 302 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
303 fsnotify_put_group(group); 303 fsnotify_put_group(group);
304 304
305 atomic_dec(&user->inotify_devs);
306
305 return 0; 307 return 0;
306} 308}
307 309
@@ -362,6 +364,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
362 return error; 364 return error;
363} 365}
364 366
367static void inotify_remove_from_idr(struct fsnotify_group *group,
368 struct inotify_inode_mark_entry *ientry)
369{
370 struct idr *idr;
371
372 spin_lock(&group->inotify_data.idr_lock);
373 idr = &group->inotify_data.idr;
374 idr_remove(idr, ientry->wd);
375 spin_unlock(&group->inotify_data.idr_lock);
376 ientry->wd = -1;
377}
365/* 378/*
366 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 379 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
367 * internal reference help on the mark because it is in the idr. 380 * internal reference help on the mark because it is in the idr.
@@ -370,13 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
370 struct fsnotify_group *group) 383 struct fsnotify_group *group)
371{ 384{
372 struct inotify_inode_mark_entry *ientry; 385 struct inotify_inode_mark_entry *ientry;
386 struct fsnotify_event *ignored_event;
373 struct inotify_event_private_data *event_priv; 387 struct inotify_event_private_data *event_priv;
374 struct fsnotify_event_private_data *fsn_event_priv; 388 struct fsnotify_event_private_data *fsn_event_priv;
375 struct idr *idr; 389
390 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
391 FSNOTIFY_EVENT_NONE, NULL, 0,
392 GFP_NOFS);
393 if (!ignored_event)
394 return;
376 395
377 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 396 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
378 397
379 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 398 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
380 if (unlikely(!event_priv)) 399 if (unlikely(!event_priv))
381 goto skip_send_ignore; 400 goto skip_send_ignore;
382 401
@@ -385,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
385 fsn_event_priv->group = group; 404 fsn_event_priv->group = group;
386 event_priv->wd = ientry->wd; 405 event_priv->wd = ientry->wd;
387 406
388 fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); 407 fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
389 408
390 /* did the private data get added? */ 409 /* did the private data get added? */
391 if (list_empty(&fsn_event_priv->event_list)) 410 if (list_empty(&fsn_event_priv->event_list))
@@ -393,14 +412,16 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
393 412
394skip_send_ignore: 413skip_send_ignore:
395 414
415 /* matches the reference taken when the event was created */
416 fsnotify_put_event(ignored_event);
417
396 /* remove this entry from the idr */ 418 /* remove this entry from the idr */
397 spin_lock(&group->inotify_data.idr_lock); 419 inotify_remove_from_idr(group, ientry);
398 idr = &group->inotify_data.idr;
399 idr_remove(idr, ientry->wd);
400 spin_unlock(&group->inotify_data.idr_lock);
401 420
402 /* removed from idr, drop that reference */ 421 /* removed from idr, drop that reference */
403 fsnotify_put_mark(entry); 422 fsnotify_put_mark(entry);
423
424 atomic_dec(&group->inotify_data.user->inotify_watches);
404} 425}
405 426
406/* ding dong the mark is dead */ 427/* ding dong the mark is dead */
@@ -415,6 +436,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
415{ 436{
416 struct fsnotify_mark_entry *entry = NULL; 437 struct fsnotify_mark_entry *entry = NULL;
417 struct inotify_inode_mark_entry *ientry; 438 struct inotify_inode_mark_entry *ientry;
439 struct inotify_inode_mark_entry *tmp_ientry;
418 int ret = 0; 440 int ret = 0;
419 int add = (arg & IN_MASK_ADD); 441 int add = (arg & IN_MASK_ADD);
420 __u32 mask; 442 __u32 mask;
@@ -425,54 +447,66 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
425 if (unlikely(!mask)) 447 if (unlikely(!mask))
426 return -EINVAL; 448 return -EINVAL;
427 449
428 ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 450 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
429 if (unlikely(!ientry)) 451 if (unlikely(!tmp_ientry))
430 return -ENOMEM; 452 return -ENOMEM;
431 /* we set the mask at the end after attaching it */ 453 /* we set the mask at the end after attaching it */
432 fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); 454 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
433 ientry->wd = 0; 455 tmp_ientry->wd = -1;
434 456
435find_entry: 457find_entry:
436 spin_lock(&inode->i_lock); 458 spin_lock(&inode->i_lock);
437 entry = fsnotify_find_mark_entry(group, inode); 459 entry = fsnotify_find_mark_entry(group, inode);
438 spin_unlock(&inode->i_lock); 460 spin_unlock(&inode->i_lock);
439 if (entry) { 461 if (entry) {
440 kmem_cache_free(inotify_inode_mark_cachep, ientry);
441 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 462 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
442 } else { 463 } else {
443 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { 464 ret = -ENOSPC;
444 ret = -ENOSPC; 465 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
445 goto out_err; 466 goto out_err;
446 }
447
448 ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
449 if (ret == -EEXIST)
450 goto find_entry;
451 else if (ret)
452 goto out_err;
453
454 entry = &ientry->fsn_entry;
455retry: 467retry:
456 ret = -ENOMEM; 468 ret = -ENOMEM;
457 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 469 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
458 goto out_err; 470 goto out_err;
459 471
460 spin_lock(&group->inotify_data.idr_lock); 472 spin_lock(&group->inotify_data.idr_lock);
461 /* if entry is added to the idr we keep the reference obtained 473 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
462 * through fsnotify_mark_add. remember to drop this reference 474 group->inotify_data.last_wd,
463 * when entry is removed from idr */ 475 &tmp_ientry->wd);
464 ret = idr_get_new_above(&group->inotify_data.idr, entry,
465 ++group->inotify_data.last_wd,
466 &ientry->wd);
467 spin_unlock(&group->inotify_data.idr_lock); 476 spin_unlock(&group->inotify_data.idr_lock);
468 if (ret) { 477 if (ret) {
469 if (ret == -EAGAIN) 478 if (ret == -EAGAIN)
470 goto retry; 479 goto retry;
471 goto out_err; 480 goto out_err;
472 } 481 }
482
483 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
484 if (ret) {
485 inotify_remove_from_idr(group, tmp_ientry);
486 if (ret == -EEXIST)
487 goto find_entry;
488 goto out_err;
489 }
490
491 /* tmp_ientry has been added to the inode, so we are all set up.
492 * now we just need to make sure tmp_ientry doesn't get freed and
493 * we need to set up entry and ientry so the generic code can
494 * do its thing. */
495 ientry = tmp_ientry;
496 entry = &ientry->fsn_entry;
497 tmp_ientry = NULL;
498
473 atomic_inc(&group->inotify_data.user->inotify_watches); 499 atomic_inc(&group->inotify_data.user->inotify_watches);
500
501 /* update the idr hint */
502 group->inotify_data.last_wd = ientry->wd;
503
504 /* we put the mark on the idr, take a reference */
505 fsnotify_get_mark(entry);
474 } 506 }
475 507
508 ret = ientry->wd;
509
476 spin_lock(&entry->lock); 510 spin_lock(&entry->lock);
477 511
478 old_mask = entry->mask; 512 old_mask = entry->mask;
@@ -503,14 +537,19 @@ retry:
503 fsnotify_recalc_group_mask(group); 537 fsnotify_recalc_group_mask(group);
504 } 538 }
505 539
506 return ientry->wd; 540 /* this either matches fsnotify_find_mark_entry, or init_mark_entry
541 * depending on which path we took... */
542 fsnotify_put_mark(entry);
507 543
508out_err: 544out_err:
509 /* see this isn't supposed to happen, just kill the watch */ 545 /* could be an error, could be that we found an existing mark */
510 if (entry) { 546 if (tmp_ientry) {
511 fsnotify_destroy_mark_by_entry(entry); 547 /* on the idr but didn't make it on the inode */
512 fsnotify_put_mark(entry); 548 if (tmp_ientry->wd != -1)
549 inotify_remove_from_idr(group, tmp_ientry);
550 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
513 } 551 }
552
514 return ret; 553 return ret;
515} 554}
516 555
@@ -718,9 +757,6 @@ static int __init inotify_user_setup(void)
718 757
719 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 758 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
720 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 759 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
721 inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
722 if (!inotify_ignored_event)
723 panic("unable to allocate the inotify ignored event\n");
724 760
725 inotify_max_queued_events = 16384; 761 inotify_max_queued_events = 16384;
726 inotify_max_user_instances = 128; 762 inotify_max_user_instances = 128;
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 959b73e756f..521368574e9 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -136,18 +136,24 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
136{ 136{
137 if ((old->mask == new->mask) && 137 if ((old->mask == new->mask) &&
138 (old->to_tell == new->to_tell) && 138 (old->to_tell == new->to_tell) &&
139 (old->data_type == new->data_type)) { 139 (old->data_type == new->data_type) &&
140 (old->name_len == new->name_len)) {
140 switch (old->data_type) { 141 switch (old->data_type) {
141 case (FSNOTIFY_EVENT_INODE): 142 case (FSNOTIFY_EVENT_INODE):
142 if (old->inode == new->inode) 143 /* remember, after old was put on the wait_q we aren't
144 * allowed to look at the inode any more, only thing
145 * left to check was if the file_name is the same */
146 if (old->name_len &&
147 !strcmp(old->file_name, new->file_name))
143 return true; 148 return true;
144 break; 149 break;
145 case (FSNOTIFY_EVENT_PATH): 150 case (FSNOTIFY_EVENT_PATH):
146 if ((old->path.mnt == new->path.mnt) && 151 if ((old->path.mnt == new->path.mnt) &&
147 (old->path.dentry == new->path.dentry)) 152 (old->path.dentry == new->path.dentry))
148 return true; 153 return true;
154 break;
149 case (FSNOTIFY_EVENT_NONE): 155 case (FSNOTIFY_EVENT_NONE):
150 return true; 156 return false;
151 }; 157 };
152 } 158 }
153 return false; 159 return false;
@@ -339,18 +345,19 @@ static void initialize_event(struct fsnotify_event *event)
339 * @name the filename, if available 345 * @name the filename, if available
340 */ 346 */
341struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, 347struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
342 int data_type, const char *name, u32 cookie) 348 int data_type, const char *name, u32 cookie,
349 gfp_t gfp)
343{ 350{
344 struct fsnotify_event *event; 351 struct fsnotify_event *event;
345 352
346 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); 353 event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
347 if (!event) 354 if (!event)
348 return NULL; 355 return NULL;
349 356
350 initialize_event(event); 357 initialize_event(event);
351 358
352 if (name) { 359 if (name) {
353 event->file_name = kstrdup(name, GFP_KERNEL); 360 event->file_name = kstrdup(name, gfp);
354 if (!event->file_name) { 361 if (!event->file_name) {
355 kmem_cache_free(fsnotify_event_cachep, event); 362 kmem_cache_free(fsnotify_event_cachep, event);
356 return NULL; 363 return NULL;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 9fcd36dcc9a..467b413bec2 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,7 +7,6 @@
7 7
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/smp_lock.h>
11 10
12#define MLOG_MASK_PREFIX ML_INODE 11#define MLOG_MASK_PREFIX ML_INODE
13#include <cluster/masklog.h> 12#include <cluster/masklog.h>
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 1a9c7878f86..ea4e6cb29e1 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -436,7 +436,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
436 rcu_assign_pointer(ptbl->part[partno], p); 436 rcu_assign_pointer(ptbl->part[partno], p);
437 437
438 /* suppress uevent if the disk supresses it */ 438 /* suppress uevent if the disk supresses it */
439 if (!dev_get_uevent_suppress(pdev)) 439 if (!dev_get_uevent_suppress(ddev))
440 kobject_uevent(&pdev->kobj, KOBJ_ADD); 440 kobject_uevent(&pdev->kobj, KOBJ_ADD);
441 441
442 return p; 442 return p;
diff --git a/fs/pipe.c b/fs/pipe.c
index f7dd21ad85a..52c41511483 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -68,8 +68,8 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
68 pipe_lock_nested(pipe1, I_MUTEX_PARENT); 68 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
69 pipe_lock_nested(pipe2, I_MUTEX_CHILD); 69 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
70 } else { 70 } else {
71 pipe_lock_nested(pipe2, I_MUTEX_CHILD); 71 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
72 pipe_lock_nested(pipe1, I_MUTEX_PARENT); 72 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
73 } 73 }
74} 74}
75 75
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 607c579e5ec..38f7bd559f3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2042,7 +2042,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2042 * changes */ 2042 * changes */
2043 invalidate_bdev(sb->s_bdev); 2043 invalidate_bdev(sb->s_bdev);
2044 } 2044 }
2045 mutex_lock(&inode->i_mutex);
2046 mutex_lock(&dqopt->dqonoff_mutex); 2045 mutex_lock(&dqopt->dqonoff_mutex);
2047 if (sb_has_quota_loaded(sb, type)) { 2046 if (sb_has_quota_loaded(sb, type)) {
2048 error = -EBUSY; 2047 error = -EBUSY;
@@ -2054,9 +2053,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2054 * possible) Also nobody should write to the file - we use 2053 * possible) Also nobody should write to the file - we use
2055 * special IO operations which ignore the immutable bit. */ 2054 * special IO operations which ignore the immutable bit. */
2056 down_write(&dqopt->dqptr_sem); 2055 down_write(&dqopt->dqptr_sem);
2056 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2058 S_NOQUOTA); 2058 S_NOQUOTA);
2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2060 mutex_unlock(&inode->i_mutex);
2060 up_write(&dqopt->dqptr_sem); 2061 up_write(&dqopt->dqptr_sem);
2061 sb->dq_op->drop(inode); 2062 sb->dq_op->drop(inode);
2062 } 2063 }
@@ -2080,7 +2081,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2080 goto out_file_init; 2081 goto out_file_init;
2081 } 2082 }
2082 mutex_unlock(&dqopt->dqio_mutex); 2083 mutex_unlock(&dqopt->dqio_mutex);
2083 mutex_unlock(&inode->i_mutex);
2084 spin_lock(&dq_state_lock); 2084 spin_lock(&dq_state_lock);
2085 dqopt->flags |= dquot_state_flag(flags, type); 2085 dqopt->flags |= dquot_state_flag(flags, type);
2086 spin_unlock(&dq_state_lock); 2086 spin_unlock(&dq_state_lock);
@@ -2094,16 +2094,17 @@ out_file_init:
2094 dqopt->files[type] = NULL; 2094 dqopt->files[type] = NULL;
2095 iput(inode); 2095 iput(inode);
2096out_lock: 2096out_lock:
2097 mutex_unlock(&dqopt->dqonoff_mutex);
2098 if (oldflags != -1) { 2097 if (oldflags != -1) {
2099 down_write(&dqopt->dqptr_sem); 2098 down_write(&dqopt->dqptr_sem);
2099 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2100 /* Set the flags back (in the case of accidental quotaon() 2100 /* Set the flags back (in the case of accidental quotaon()
2101 * on a wrong file we don't want to mess up the flags) */ 2101 * on a wrong file we don't want to mess up the flags) */
2102 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2102 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2103 inode->i_flags |= oldflags; 2103 inode->i_flags |= oldflags;
2104 mutex_unlock(&inode->i_mutex);
2104 up_write(&dqopt->dqptr_sem); 2105 up_write(&dqopt->dqptr_sem);
2105 } 2106 }
2106 mutex_unlock(&inode->i_mutex); 2107 mutex_unlock(&dqopt->dqonoff_mutex);
2107out_fmt: 2108out_fmt:
2108 put_quota_format(fmt); 2109 put_quota_format(fmt);
2109 2110
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ebb2c417912..11f0c06316d 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -20,6 +20,7 @@
20#include <linux/ramfs.h> 20#include <linux/ramfs.h>
21#include <linux/pagevec.h> 21#include <linux/pagevec.h>
22#include <linux/mman.h> 22#include <linux/mman.h>
23#include <linux/sched.h>
23 24
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include "internal.h" 26#include "internal.h"
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 77f5bb746bf..90622200b39 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -997,7 +997,7 @@ static int reiserfs_async_progress_wait(struct super_block *s)
997 DEFINE_WAIT(wait); 997 DEFINE_WAIT(wait);
998 struct reiserfs_journal *j = SB_JOURNAL(s); 998 struct reiserfs_journal *j = SB_JOURNAL(s);
999 if (atomic_read(&j->j_async_throttle)) 999 if (atomic_read(&j->j_async_throttle))
1000 congestion_wait(WRITE, HZ / 10); 1000 congestion_wait(BLK_RW_ASYNC, HZ / 10);
1001 return 0; 1001 return 0;
1002} 1002}
1003 1003
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d3aeb061612..7adea74d6a8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -24,7 +24,6 @@
24#include <linux/exportfs.h> 24#include <linux/exportfs.h>
25#include <linux/quotaops.h> 25#include <linux/quotaops.h>
26#include <linux/vfs.h> 26#include <linux/vfs.h>
27#include <linux/mnt_namespace.h>
28#include <linux/mount.h> 27#include <linux/mount.h>
29#include <linux/namei.h> 28#include <linux/namei.h>
30#include <linux/crc32.h> 29#include <linux/crc32.h>
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index f3d47d85684..6925b835a43 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -46,7 +46,6 @@
46#include <linux/reiserfs_acl.h> 46#include <linux/reiserfs_acl.h>
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <net/checksum.h> 48#include <net/checksum.h>
49#include <linux/smp_lock.h>
50#include <linux/stat.h> 49#include <linux/stat.h>
51#include <linux/quotaops.h> 50#include <linux/quotaops.h>
52 51
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 3b52770f46f..cb5fc57e370 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -30,6 +30,7 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/vfs.h> 31#include <linux/vfs.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/smp_lock.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/pagemap.h> 35#include <linux/pagemap.h>
35#include <linux/init.h> 36#include <linux/init.h>
diff --git a/fs/sync.c b/fs/sync.c
index dd200025af8..3422ba61d86 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -112,8 +112,13 @@ restart:
112 mutex_unlock(&mutex); 112 mutex_unlock(&mutex);
113} 113}
114 114
115/*
116 * sync everything. Start out by waking pdflush, because that writes back
117 * all queues in parallel.
118 */
115SYSCALL_DEFINE0(sync) 119SYSCALL_DEFINE0(sync)
116{ 120{
121 wakeup_pdflush(0);
117 sync_filesystems(0); 122 sync_filesystems(0);
118 sync_filesystems(1); 123 sync_filesystems(1);
119 if (unlikely(laptop_mode)) 124 if (unlikely(laptop_mode))
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 9345806c885..2524714bece 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -171,6 +171,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
171 if (count > 0) 171 if (count > 0)
172 *off = offs + count; 172 *off = offs + count;
173 173
174 kfree(temp);
174 return count; 175 return count;
175} 176}
176 177
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index d88d0fac9fa..14f2d71ea3c 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -939,8 +939,10 @@ again:
939 /* Remove from old parent's list and insert into new parent's list. */ 939 /* Remove from old parent's list and insert into new parent's list. */
940 sysfs_unlink_sibling(sd); 940 sysfs_unlink_sibling(sd);
941 sysfs_get(new_parent_sd); 941 sysfs_get(new_parent_sd);
942 drop_nlink(old_parent->d_inode);
942 sysfs_put(sd->s_parent); 943 sysfs_put(sd->s_parent);
943 sd->s_parent = new_parent_sd; 944 sd->s_parent = new_parent_sd;
945 inc_nlink(new_parent->d_inode);
944 sysfs_link_sibling(sd); 946 sysfs_link_sibling(sd);
945 947
946 out_unlock: 948 out_unlock:
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index bc5857199ec..762a7d6cec7 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -297,6 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
297{ 297{
298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); 298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
299 299
300 dbg_io("jhead %d", wbuf->jhead);
300 wbuf->need_sync = 1; 301 wbuf->need_sync = 1;
301 wbuf->c->need_wbuf_sync = 1; 302 wbuf->c->need_wbuf_sync = 1;
302 ubifs_wake_up_bgt(wbuf->c); 303 ubifs_wake_up_bgt(wbuf->c);
@@ -311,8 +312,12 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
311{ 312{
312 ubifs_assert(!hrtimer_active(&wbuf->timer)); 313 ubifs_assert(!hrtimer_active(&wbuf->timer));
313 314
314 if (!ktime_to_ns(wbuf->softlimit)) 315 if (wbuf->no_timer)
315 return; 316 return;
317 dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead,
318 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
319 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
320 USEC_PER_SEC));
316 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, 321 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
317 HRTIMER_MODE_REL); 322 HRTIMER_MODE_REL);
318} 323}
@@ -323,11 +328,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
323 */ 328 */
324static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) 329static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
325{ 330{
326 /* 331 if (wbuf->no_timer)
327 * If the syncer is waiting for the lock (from the background thread's 332 return;
328 * context) and another task is changing write-buffer then the syncing
329 * should be canceled.
330 */
331 wbuf->need_sync = 0; 333 wbuf->need_sync = 0;
332 hrtimer_cancel(&wbuf->timer); 334 hrtimer_cancel(&wbuf->timer);
333} 335}
@@ -349,8 +351,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
349 /* Write-buffer is empty or not seeked */ 351 /* Write-buffer is empty or not seeked */
350 return 0; 352 return 0;
351 353
352 dbg_io("LEB %d:%d, %d bytes", 354 dbg_io("LEB %d:%d, %d bytes, jhead %d",
353 wbuf->lnum, wbuf->offs, wbuf->used); 355 wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead);
354 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); 356 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
355 ubifs_assert(!(wbuf->avail & 7)); 357 ubifs_assert(!(wbuf->avail & 7));
356 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); 358 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
@@ -390,7 +392,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
390 * @offs: logical eraseblock offset to seek to 392 * @offs: logical eraseblock offset to seek to
391 * @dtype: data type 393 * @dtype: data type
392 * 394 *
393 * This function targets the write buffer to logical eraseblock @lnum:@offs. 395 * This function targets the write-buffer to logical eraseblock @lnum:@offs.
394 * The write-buffer is synchronized if it is not empty. Returns zero in case of 396 * The write-buffer is synchronized if it is not empty. Returns zero in case of
395 * success and a negative error code in case of failure. 397 * success and a negative error code in case of failure.
396 */ 398 */
@@ -399,7 +401,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
399{ 401{
400 const struct ubifs_info *c = wbuf->c; 402 const struct ubifs_info *c = wbuf->c;
401 403
402 dbg_io("LEB %d:%d", lnum, offs); 404 dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead);
403 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); 405 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
404 ubifs_assert(offs >= 0 && offs <= c->leb_size); 406 ubifs_assert(offs >= 0 && offs <= c->leb_size);
405 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 407 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
@@ -506,9 +508,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
506 struct ubifs_info *c = wbuf->c; 508 struct ubifs_info *c = wbuf->c;
507 int err, written, n, aligned_len = ALIGN(len, 8), offs; 509 int err, written, n, aligned_len = ALIGN(len, 8), offs;
508 510
509 dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len, 511 dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len,
510 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum, 512 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead,
511 wbuf->offs + wbuf->used); 513 wbuf->lnum, wbuf->offs + wbuf->used);
512 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 514 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
513 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 515 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
514 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 516 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
@@ -533,8 +535,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
533 memcpy(wbuf->buf + wbuf->used, buf, len); 535 memcpy(wbuf->buf + wbuf->used, buf, len);
534 536
535 if (aligned_len == wbuf->avail) { 537 if (aligned_len == wbuf->avail) {
536 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, 538 dbg_io("flush jhead %d wbuf to LEB %d:%d",
537 wbuf->offs); 539 wbuf->jhead, wbuf->lnum, wbuf->offs);
538 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, 540 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
539 wbuf->offs, c->min_io_size, 541 wbuf->offs, c->min_io_size,
540 wbuf->dtype); 542 wbuf->dtype);
@@ -562,7 +564,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
562 * minimal I/O unit. We have to fill and flush write-buffer and switch 564 * minimal I/O unit. We have to fill and flush write-buffer and switch
563 * to the next min. I/O unit. 565 * to the next min. I/O unit.
564 */ 566 */
565 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs); 567 dbg_io("flush jhead %d wbuf to LEB %d:%d",
568 wbuf->jhead, wbuf->lnum, wbuf->offs);
566 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); 569 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
567 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, 570 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
568 c->min_io_size, wbuf->dtype); 571 c->min_io_size, wbuf->dtype);
@@ -695,7 +698,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
695 int err, rlen, overlap; 698 int err, rlen, overlap;
696 struct ubifs_ch *ch = buf; 699 struct ubifs_ch *ch = buf;
697 700
698 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); 701 dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs,
702 dbg_ntype(type), len, wbuf->jhead);
699 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 703 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
700 ubifs_assert(!(offs & 7) && offs < c->leb_size); 704 ubifs_assert(!(offs & 7) && offs < c->leb_size);
701 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 705 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
@@ -819,13 +823,12 @@ out:
819 * @c: UBIFS file-system description object 823 * @c: UBIFS file-system description object
820 * @wbuf: write-buffer to initialize 824 * @wbuf: write-buffer to initialize
821 * 825 *
822 * This function initializes write buffer. Returns zero in case of success 826 * This function initializes write-buffer. Returns zero in case of success
823 * %-ENOMEM in case of failure. 827 * %-ENOMEM in case of failure.
824 */ 828 */
825int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) 829int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
826{ 830{
827 size_t size; 831 size_t size;
828 ktime_t hardlimit;
829 832
830 wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL); 833 wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL);
831 if (!wbuf->buf) 834 if (!wbuf->buf)
@@ -851,22 +854,16 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
851 854
852 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 855 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
853 wbuf->timer.function = wbuf_timer_callback_nolock; 856 wbuf->timer.function = wbuf_timer_callback_nolock;
854 /* 857 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
855 * Make write-buffer soft limit to be 20% of the hard limit. The 858 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
856 * write-buffer timer is allowed to expire any time between the soft 859 wbuf->delta *= 1000000000ULL;
857 * and hard limits. 860 ubifs_assert(wbuf->delta <= ULONG_MAX);
858 */
859 hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0);
860 wbuf->delta = (DEFAULT_WBUF_TIMEOUT_SECS * NSEC_PER_SEC) * 2 / 10;
861 wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta);
862 hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit,
863 wbuf->delta);
864 return 0; 861 return 0;
865} 862}
866 863
867/** 864/**
868 * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. 865 * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
869 * @wbuf: the write-buffer whereto add 866 * @wbuf: the write-buffer where to add
870 * @inum: the inode number 867 * @inum: the inode number
871 * 868 *
872 * This function adds an inode number to the inode array of the write-buffer. 869 * This function adds an inode number to the inode array of the write-buffer.
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 6db7a6be6c9..8aacd64957a 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -25,7 +25,6 @@
25/* This file implements EXT2-compatible extended attribute ioctl() calls */ 25/* This file implements EXT2-compatible extended attribute ioctl() calls */
26 26
27#include <linux/compat.h> 27#include <linux/compat.h>
28#include <linux/smp_lock.h>
29#include <linux/mount.h> 28#include <linux/mount.h>
30#include "ubifs.h" 29#include "ubifs.h"
31 30
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 805605250f1..e5f6cf8a115 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -53,6 +53,25 @@ static int is_empty(void *buf, int len)
53} 53}
54 54
55/** 55/**
56 * first_non_ff - find offset of the first non-0xff byte.
57 * @buf: buffer to search in
58 * @len: length of buffer
59 *
60 * This function returns offset of the first non-0xff byte in @buf or %-1 if
61 * the buffer contains only 0xff bytes.
62 */
63static int first_non_ff(void *buf, int len)
64{
65 uint8_t *p = buf;
66 int i;
67
68 for (i = 0; i < len; i++)
69 if (*p++ != 0xff)
70 return i;
71 return -1;
72}
73
74/**
56 * get_master_node - get the last valid master node allowing for corruption. 75 * get_master_node - get the last valid master node allowing for corruption.
57 * @c: UBIFS file-system description object 76 * @c: UBIFS file-system description object
58 * @lnum: LEB number 77 * @lnum: LEB number
@@ -357,11 +376,7 @@ static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
357 empty_offs = ALIGN(offs + 1, c->min_io_size); 376 empty_offs = ALIGN(offs + 1, c->min_io_size);
358 check_len = c->leb_size - empty_offs; 377 check_len = c->leb_size - empty_offs;
359 p = buf + empty_offs - offs; 378 p = buf + empty_offs - offs;
360 379 return is_empty(p, check_len);
361 for (; check_len > 0; check_len--)
362 if (*p++ != 0xff)
363 return 0;
364 return 1;
365} 380}
366 381
367/** 382/**
@@ -543,8 +558,8 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
543 * 558 *
544 * This function does a scan of a LEB, but caters for errors that might have 559 * This function does a scan of a LEB, but caters for errors that might have
545 * been caused by the unclean unmount from which we are attempting to recover. 560 * been caused by the unclean unmount from which we are attempting to recover.
546 * 561 * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is
547 * This function returns %0 on success and a negative error code on failure. 562 * found, and a negative error code in case of failure.
548 */ 563 */
549struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 564struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
550 int offs, void *sbuf, int grouped) 565 int offs, void *sbuf, int grouped)
@@ -643,7 +658,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
643 goto corrupted; 658 goto corrupted;
644 default: 659 default:
645 dbg_err("unknown"); 660 dbg_err("unknown");
646 goto corrupted; 661 err = -EINVAL;
662 goto error;
647 } 663 }
648 } 664 }
649 665
@@ -652,8 +668,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
652 clean_buf(c, &buf, lnum, &offs, &len); 668 clean_buf(c, &buf, lnum, &offs, &len);
653 need_clean = 1; 669 need_clean = 1;
654 } else { 670 } else {
655 ubifs_err("corrupt empty space at LEB %d:%d", 671 int corruption = first_non_ff(buf, len);
656 lnum, offs); 672
673 ubifs_err("corrupt empty space LEB %d:%d, corruption "
674 "starts at %d", lnum, offs, corruption);
675 /* Make sure we dump interesting non-0xFF data */
676 offs = corruption;
677 buf += corruption;
657 goto corrupted; 678 goto corrupted;
658 } 679 }
659 } 680 }
@@ -813,7 +834,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
813static int recover_head(const struct ubifs_info *c, int lnum, int offs, 834static int recover_head(const struct ubifs_info *c, int lnum, int offs,
814 void *sbuf) 835 void *sbuf)
815{ 836{
816 int len, err, need_clean = 0; 837 int len, err;
817 838
818 if (c->min_io_size > 1) 839 if (c->min_io_size > 1)
819 len = c->min_io_size; 840 len = c->min_io_size;
@@ -827,19 +848,7 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs,
827 848
828 /* Read at the head location and check it is empty flash */ 849 /* Read at the head location and check it is empty flash */
829 err = ubi_read(c->ubi, lnum, sbuf, offs, len); 850 err = ubi_read(c->ubi, lnum, sbuf, offs, len);
830 if (err) 851 if (err || !is_empty(sbuf, len)) {
831 need_clean = 1;
832 else {
833 uint8_t *p = sbuf;
834
835 while (len--)
836 if (*p++ != 0xff) {
837 need_clean = 1;
838 break;
839 }
840 }
841
842 if (need_clean) {
843 dbg_rcvry("cleaning head at %d:%d", lnum, offs); 852 dbg_rcvry("cleaning head at %d:%d", lnum, offs);
844 if (offs == 0) 853 if (offs == 0)
845 return ubifs_leb_unmap(c, lnum); 854 return ubifs_leb_unmap(c, lnum);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 11cc80125a4..2970500f32d 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -837,9 +837,10 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
837 837
838 dbg_mnt("replay log LEB %d:%d", lnum, offs); 838 dbg_mnt("replay log LEB %d:%d", lnum, offs);
839 sleb = ubifs_scan(c, lnum, offs, sbuf); 839 sleb = ubifs_scan(c, lnum, offs, sbuf);
840 if (IS_ERR(sleb)) { 840 if (IS_ERR(sleb) ) {
841 if (c->need_recovery) 841 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
842 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); 842 return PTR_ERR(sleb);
843 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
843 if (IS_ERR(sleb)) 844 if (IS_ERR(sleb))
844 return PTR_ERR(sleb); 845 return PTR_ERR(sleb);
845 } 846 }
@@ -957,7 +958,7 @@ out:
957 return err; 958 return err;
958 959
959out_dump: 960out_dump:
960 ubifs_err("log error detected while replying the log at LEB %d:%d", 961 ubifs_err("log error detected while replaying the log at LEB %d:%d",
961 lnum, offs + snod->offs); 962 lnum, offs + snod->offs);
962 dbg_dump_node(c, snod->node); 963 dbg_dump_node(c, snod->node);
963 ubifs_scan_destroy(sleb); 964 ubifs_scan_destroy(sleb);
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 0ed82479b44..892ebfee4fe 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -238,12 +238,12 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
238{ 238{
239 int len; 239 int len;
240 240
241 ubifs_err("corrupted data at LEB %d:%d", lnum, offs); 241 ubifs_err("corruption at LEB %d:%d", lnum, offs);
242 if (dbg_failure_mode) 242 if (dbg_failure_mode)
243 return; 243 return;
244 len = c->leb_size - offs; 244 len = c->leb_size - offs;
245 if (len > 4096) 245 if (len > 8192)
246 len = 4096; 246 len = 8192;
247 dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs); 247 dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs);
248 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); 248 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
249} 249}
@@ -256,7 +256,9 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
256 * @sbuf: scan buffer (must be c->leb_size) 256 * @sbuf: scan buffer (must be c->leb_size)
257 * 257 *
258 * This function scans LEB number @lnum and returns complete information about 258 * This function scans LEB number @lnum and returns complete information about
259 * its contents. Returns an error code in case of failure. 259 * its contents. Returns the scaned information in case of success and,
260 * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case
261 * of failure.
260 */ 262 */
261struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, 263struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
262 int offs, void *sbuf) 264 int offs, void *sbuf)
@@ -279,7 +281,6 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
279 cond_resched(); 281 cond_resched();
280 282
281 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); 283 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
282
283 if (ret > 0) { 284 if (ret > 0) {
284 /* Padding bytes or a valid padding node */ 285 /* Padding bytes or a valid padding node */
285 offs += ret; 286 offs += ret;
@@ -304,7 +305,8 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
304 goto corrupted; 305 goto corrupted;
305 default: 306 default:
306 dbg_err("unknown"); 307 dbg_err("unknown");
307 goto corrupted; 308 err = -EINVAL;
309 goto error;
308 } 310 }
309 311
310 err = ubifs_add_snod(c, sleb, buf, offs); 312 err = ubifs_add_snod(c, sleb, buf, offs);
@@ -317,8 +319,10 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
317 len -= node_len; 319 len -= node_len;
318 } 320 }
319 321
320 if (offs % c->min_io_size) 322 if (offs % c->min_io_size) {
321 goto corrupted; 323 ubifs_err("empty space starts at non-aligned offset %d", offs);
324 goto corrupted;;
325 }
322 326
323 ubifs_end_scan(c, sleb, lnum, offs); 327 ubifs_end_scan(c, sleb, lnum, offs);
324 328
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 79fad43f3c5..26d2e0d8046 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -797,7 +797,7 @@ static int alloc_wbufs(struct ubifs_info *c)
797 * does not need to be synchronized by timer. 797 * does not need to be synchronized by timer.
798 */ 798 */
799 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; 799 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
800 c->jheads[GCHD].wbuf.softlimit = ktime_set(0, 0); 800 c->jheads[GCHD].wbuf.no_timer = 1;
801 801
802 return 0; 802 return 0;
803} 803}
@@ -986,7 +986,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
986 switch (token) { 986 switch (token) {
987 /* 987 /*
988 * %Opt_fast_unmount and %Opt_norm_unmount options are ignored. 988 * %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
989 * We accepte them in order to be backware-compatible. But this 989 * We accept them in order to be backward-compatible. But this
990 * should be removed at some point. 990 * should be removed at some point.
991 */ 991 */
992 case Opt_fast_unmount: 992 case Opt_fast_unmount:
@@ -1287,6 +1287,9 @@ static int mount_ubifs(struct ubifs_info *c)
1287 if (err) 1287 if (err)
1288 goto out_journal; 1288 goto out_journal;
1289 1289
1290 /* Calculate 'min_idx_lebs' after journal replay */
1291 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
1292
1290 err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only); 1293 err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
1291 if (err) 1294 if (err)
1292 goto out_orphans; 1295 goto out_orphans;
@@ -1754,10 +1757,8 @@ static void ubifs_put_super(struct super_block *sb)
1754 1757
1755 /* Synchronize write-buffers */ 1758 /* Synchronize write-buffers */
1756 if (c->jheads) 1759 if (c->jheads)
1757 for (i = 0; i < c->jhead_cnt; i++) { 1760 for (i = 0; i < c->jhead_cnt; i++)
1758 ubifs_wbuf_sync(&c->jheads[i].wbuf); 1761 ubifs_wbuf_sync(&c->jheads[i].wbuf);
1759 hrtimer_cancel(&c->jheads[i].wbuf.timer);
1760 }
1761 1762
1762 /* 1763 /*
1763 * On fatal errors c->ro_media is set to 1, in which case we do 1764 * On fatal errors c->ro_media is set to 1, in which case we do
@@ -1975,7 +1976,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
1975 err = bdi_init(&c->bdi); 1976 err = bdi_init(&c->bdi);
1976 if (err) 1977 if (err)
1977 goto out_close; 1978 goto out_close;
1978 err = bdi_register(&c->bdi, NULL, "ubifs"); 1979 err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d",
1980 c->vi.ubi_num, c->vi.vol_id);
1979 if (err) 1981 if (err)
1980 goto out_bdi; 1982 goto out_bdi;
1981 1983
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 1bf01d82006..a2934909442 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -95,8 +95,9 @@
95 */ 95 */
96#define BGT_NAME_PATTERN "ubifs_bgt%d_%d" 96#define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
97 97
98/* Default write-buffer synchronization timeout in seconds */ 98/* Write-buffer synchronization timeout interval in seconds */
99#define DEFAULT_WBUF_TIMEOUT_SECS 5 99#define WBUF_TIMEOUT_SOFTLIMIT 3
100#define WBUF_TIMEOUT_HARDLIMIT 5
100 101
101/* Maximum possible inode number (only 32-bit inodes are supported now) */ 102/* Maximum possible inode number (only 32-bit inodes are supported now) */
102#define MAX_INUM 0xFFFFFFFF 103#define MAX_INUM 0xFFFFFFFF
@@ -654,7 +655,8 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
654 * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit 655 * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit
655 * and @softlimit + @delta) 656 * and @softlimit + @delta)
656 * @timer: write-buffer timer 657 * @timer: write-buffer timer
657 * @need_sync: it is set if its timer expired and needs sync 658 * @no_timer: non-zero if this write-buffer does not have a timer
659 * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing
658 * @next_ino: points to the next position of the following inode number 660 * @next_ino: points to the next position of the following inode number
659 * @inodes: stores the inode numbers of the nodes which are in wbuf 661 * @inodes: stores the inode numbers of the nodes which are in wbuf
660 * 662 *
@@ -683,7 +685,8 @@ struct ubifs_wbuf {
683 ktime_t softlimit; 685 ktime_t softlimit;
684 unsigned long long delta; 686 unsigned long long delta;
685 struct hrtimer timer; 687 struct hrtimer timer;
686 int need_sync; 688 unsigned int no_timer:1;
689 unsigned int need_sync:1;
687 int next_ino; 690 int next_ino;
688 ino_t *inodes; 691 ino_t *inodes;
689}; 692};
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6832135159b..9d1b8c2e6c4 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1087,11 +1087,23 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1087 struct udf_inode_info *vati; 1087 struct udf_inode_info *vati;
1088 uint32_t pos; 1088 uint32_t pos;
1089 struct virtualAllocationTable20 *vat20; 1089 struct virtualAllocationTable20 *vat20;
1090 sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
1090 1091
1091 /* VAT file entry is in the last recorded block */ 1092 /* VAT file entry is in the last recorded block */
1092 ino.partitionReferenceNum = type1_index; 1093 ino.partitionReferenceNum = type1_index;
1093 ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root; 1094 ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
1094 sbi->s_vat_inode = udf_iget(sb, &ino); 1095 sbi->s_vat_inode = udf_iget(sb, &ino);
1096 if (!sbi->s_vat_inode &&
1097 sbi->s_last_block != blocks - 1) {
1098 printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
1099 " last recorded block (%lu), retrying with the last "
1100 "block of the device (%lu).\n",
1101 (unsigned long)sbi->s_last_block,
1102 (unsigned long)blocks - 1);
1103 ino.partitionReferenceNum = type1_index;
1104 ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
1105 sbi->s_vat_inode = udf_iget(sb, &ino);
1106 }
1095 if (!sbi->s_vat_inode) 1107 if (!sbi->s_vat_inode)
1096 return 1; 1108 return 1;
1097 1109
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 1cd3b55ee3d..2d3f90afe5f 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -53,7 +53,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
53 printk(KERN_ERR "XFS: possible memory allocation " 53 printk(KERN_ERR "XFS: possible memory allocation "
54 "deadlock in %s (mode:0x%x)\n", 54 "deadlock in %s (mode:0x%x)\n",
55 __func__, lflags); 55 __func__, lflags);
56 congestion_wait(WRITE, HZ/50); 56 congestion_wait(BLK_RW_ASYNC, HZ/50);
57 } while (1); 57 } while (1);
58} 58}
59 59
@@ -130,7 +130,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
130 printk(KERN_ERR "XFS: possible memory allocation " 130 printk(KERN_ERR "XFS: possible memory allocation "
131 "deadlock in %s (mode:0x%x)\n", 131 "deadlock in %s (mode:0x%x)\n",
132 __func__, lflags); 132 __func__, lflags);
133 congestion_wait(WRITE, HZ/50); 133 congestion_wait(BLK_RW_ASYNC, HZ/50);
134 } while (1); 134 } while (1);
135} 135}
136 136
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7ec89fc05b2..aecf2519db7 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1268,6 +1268,14 @@ xfs_vm_writepage(
1268 if (!page_has_buffers(page)) 1268 if (!page_has_buffers(page))
1269 create_empty_buffers(page, 1 << inode->i_blkbits, 0); 1269 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1270 1270
1271
1272 /*
1273 * VM calculation for nr_to_write seems off. Bump it way
1274 * up, this gets simple streaming writes zippy again.
1275 * To be reviewed again after Jens' writeback changes.
1276 */
1277 wbc->nr_to_write *= 4;
1278
1271 /* 1279 /*
1272 * Convert delayed allocate, unwritten or unmapped space 1280 * Convert delayed allocate, unwritten or unmapped space
1273 * to real space and flush out to disk. 1281 * to real space and flush out to disk.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 1418b916fc2..0c93c7ef3d1 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -412,7 +412,7 @@ _xfs_buf_lookup_pages(
412 412
413 XFS_STATS_INC(xb_page_retries); 413 XFS_STATS_INC(xb_page_retries);
414 xfsbufd_wakeup(0, gfp_mask); 414 xfsbufd_wakeup(0, gfp_mask);
415 congestion_wait(WRITE, HZ/50); 415 congestion_wait(BLK_RW_ASYNC, HZ/50);
416 goto retry; 416 goto retry;
417 } 417 }
418 418
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f4e25544157..0542fd50764 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -41,7 +41,6 @@
41#include "xfs_ioctl.h" 41#include "xfs_ioctl.h"
42 42
43#include <linux/dcache.h> 43#include <linux/dcache.h>
44#include <linux/smp_lock.h>
45 44
46static struct vm_operations_struct xfs_file_vm_ops; 45static struct vm_operations_struct xfs_file_vm_ops;
47 46
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 58973bb4603..8070b34cc28 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -680,8 +680,8 @@ xfs_vn_fiemap(
680 else 680 else
681 bm.bmv_length = BTOBB(length); 681 bm.bmv_length = BTOBB(length);
682 682
683 /* our formatter will tell xfs_getbmap when to stop. */ 683 /* We add one because in getbmap world count includes the header */
684 bm.bmv_count = MAXEXTNUM; 684 bm.bmv_count = fieinfo->fi_extents_max + 1;
685 bm.bmv_iflags = BMV_IF_PREALLOC; 685 bm.bmv_iflags = BMV_IF_PREALLOC;
686 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) 686 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
687 bm.bmv_iflags |= BMV_IF_ATTRFORK; 687 bm.bmv_iflags |= BMV_IF_ATTRFORK;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 5fcec6f020a..34ec86923f7 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -64,6 +64,10 @@ xfs_inode_alloc(
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
65 if (!ip) 65 if (!ip)
66 return NULL; 66 return NULL;
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
69 return NULL;
70 }
67 71
68 ASSERT(atomic_read(&ip->i_iocount) == 0); 72 ASSERT(atomic_read(&ip->i_iocount) == 0);
69 ASSERT(atomic_read(&ip->i_pincount) == 0); 73 ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -105,17 +109,6 @@ xfs_inode_alloc(
105#ifdef XFS_DIR2_TRACE 109#ifdef XFS_DIR2_TRACE
106 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); 110 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
107#endif 111#endif
108 /*
109 * Now initialise the VFS inode. We do this after the xfs_inode
110 * initialisation as internal failures will result in ->destroy_inode
111 * being called and that will pass down through the reclaim path and
112 * free the XFS inode. This path requires the XFS inode to already be
113 * initialised. Hence if this call fails, the xfs_inode has already
114 * been freed and we should not reference it at all in the error
115 * handling.
116 */
117 if (!inode_init_always(mp->m_super, VFS_I(ip)))
118 return NULL;
119 112
120 /* prevent anyone from using this yet */ 113 /* prevent anyone from using this yet */
121 VFS_I(ip)->i_state = I_NEW|I_LOCK; 114 VFS_I(ip)->i_state = I_NEW|I_LOCK;
@@ -123,6 +116,71 @@ xfs_inode_alloc(
123 return ip; 116 return ip;
124} 117}
125 118
119STATIC void
120xfs_inode_free(
121 struct xfs_inode *ip)
122{
123 switch (ip->i_d.di_mode & S_IFMT) {
124 case S_IFREG:
125 case S_IFDIR:
126 case S_IFLNK:
127 xfs_idestroy_fork(ip, XFS_DATA_FORK);
128 break;
129 }
130
131 if (ip->i_afp)
132 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
133
134#ifdef XFS_INODE_TRACE
135 ktrace_free(ip->i_trace);
136#endif
137#ifdef XFS_BMAP_TRACE
138 ktrace_free(ip->i_xtrace);
139#endif
140#ifdef XFS_BTREE_TRACE
141 ktrace_free(ip->i_btrace);
142#endif
143#ifdef XFS_RW_TRACE
144 ktrace_free(ip->i_rwtrace);
145#endif
146#ifdef XFS_ILOCK_TRACE
147 ktrace_free(ip->i_lock_trace);
148#endif
149#ifdef XFS_DIR2_TRACE
150 ktrace_free(ip->i_dir_trace);
151#endif
152
153 if (ip->i_itemp) {
154 /*
155 * Only if we are shutting down the fs will we see an
156 * inode still in the AIL. If it is there, we should remove
157 * it to prevent a use-after-free from occurring.
158 */
159 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
160 struct xfs_ail *ailp = lip->li_ailp;
161
162 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
163 XFS_FORCED_SHUTDOWN(ip->i_mount));
164 if (lip->li_flags & XFS_LI_IN_AIL) {
165 spin_lock(&ailp->xa_lock);
166 if (lip->li_flags & XFS_LI_IN_AIL)
167 xfs_trans_ail_delete(ailp, lip);
168 else
169 spin_unlock(&ailp->xa_lock);
170 }
171 xfs_inode_item_destroy(ip);
172 ip->i_itemp = NULL;
173 }
174
175 /* asserts to verify all state is correct here */
176 ASSERT(atomic_read(&ip->i_iocount) == 0);
177 ASSERT(atomic_read(&ip->i_pincount) == 0);
178 ASSERT(!spin_is_locked(&ip->i_flags_lock));
179 ASSERT(completion_done(&ip->i_flush));
180
181 kmem_zone_free(xfs_inode_zone, ip);
182}
183
126/* 184/*
127 * Check the validity of the inode we just found it the cache 185 * Check the validity of the inode we just found it the cache
128 */ 186 */
@@ -167,7 +225,7 @@ xfs_iget_cache_hit(
167 * errors cleanly, then tag it so it can be set up correctly 225 * errors cleanly, then tag it so it can be set up correctly
168 * later. 226 * later.
169 */ 227 */
170 if (!inode_init_always(mp->m_super, VFS_I(ip))) { 228 if (inode_init_always(mp->m_super, VFS_I(ip))) {
171 error = ENOMEM; 229 error = ENOMEM;
172 goto out_error; 230 goto out_error;
173 } 231 }
@@ -299,7 +357,8 @@ out_preload_end:
299 if (lock_flags) 357 if (lock_flags)
300 xfs_iunlock(ip, lock_flags); 358 xfs_iunlock(ip, lock_flags);
301out_destroy: 359out_destroy:
302 xfs_destroy_inode(ip); 360 __destroy_inode(VFS_I(ip));
361 xfs_inode_free(ip);
303 return error; 362 return error;
304} 363}
305 364
@@ -504,62 +563,7 @@ xfs_ireclaim(
504 xfs_qm_dqdetach(ip); 563 xfs_qm_dqdetach(ip);
505 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 564 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
506 565
507 switch (ip->i_d.di_mode & S_IFMT) { 566 xfs_inode_free(ip);
508 case S_IFREG:
509 case S_IFDIR:
510 case S_IFLNK:
511 xfs_idestroy_fork(ip, XFS_DATA_FORK);
512 break;
513 }
514
515 if (ip->i_afp)
516 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
517
518#ifdef XFS_INODE_TRACE
519 ktrace_free(ip->i_trace);
520#endif
521#ifdef XFS_BMAP_TRACE
522 ktrace_free(ip->i_xtrace);
523#endif
524#ifdef XFS_BTREE_TRACE
525 ktrace_free(ip->i_btrace);
526#endif
527#ifdef XFS_RW_TRACE
528 ktrace_free(ip->i_rwtrace);
529#endif
530#ifdef XFS_ILOCK_TRACE
531 ktrace_free(ip->i_lock_trace);
532#endif
533#ifdef XFS_DIR2_TRACE
534 ktrace_free(ip->i_dir_trace);
535#endif
536 if (ip->i_itemp) {
537 /*
538 * Only if we are shutting down the fs will we see an
539 * inode still in the AIL. If it is there, we should remove
540 * it to prevent a use-after-free from occurring.
541 */
542 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
543 struct xfs_ail *ailp = lip->li_ailp;
544
545 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
546 XFS_FORCED_SHUTDOWN(ip->i_mount));
547 if (lip->li_flags & XFS_LI_IN_AIL) {
548 spin_lock(&ailp->xa_lock);
549 if (lip->li_flags & XFS_LI_IN_AIL)
550 xfs_trans_ail_delete(ailp, lip);
551 else
552 spin_unlock(&ailp->xa_lock);
553 }
554 xfs_inode_item_destroy(ip);
555 ip->i_itemp = NULL;
556 }
557 /* asserts to verify all state is correct here */
558 ASSERT(atomic_read(&ip->i_iocount) == 0);
559 ASSERT(atomic_read(&ip->i_pincount) == 0);
560 ASSERT(!spin_is_locked(&ip->i_flags_lock));
561 ASSERT(completion_done(&ip->i_flush));
562 kmem_zone_free(xfs_inode_zone, ip);
563} 567}
564 568
565/* 569/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1804f866a71..65f24a3cc99 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -310,23 +310,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
310} 310}
311 311
312/* 312/*
313 * Get rid of a partially initialized inode.
314 *
315 * We have to go through destroy_inode to make sure allocations
316 * from init_inode_always like the security data are undone.
317 *
318 * We mark the inode bad so that it takes the short cut in
319 * the reclaim path instead of going through the flush path
320 * which doesn't make sense for an inode that has never seen the
321 * light of day.
322 */
323static inline void xfs_destroy_inode(struct xfs_inode *ip)
324{
325 make_bad_inode(VFS_I(ip));
326 return destroy_inode(VFS_I(ip));
327}
328
329/*
330 * i_flags helper functions 313 * i_flags helper functions
331 */ 314 */
332static inline void 315static inline void