diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/bio.c | 23 | ||||
| -rw-r--r-- | fs/cifs/TODO | 2 | ||||
| -rw-r--r-- | fs/cifs/cifs_fs_sb.h | 6 | ||||
| -rw-r--r-- | fs/cifs/cifsfs.c | 5 | ||||
| -rw-r--r-- | fs/cifs/cifsglob.h | 3 | ||||
| -rw-r--r-- | fs/cifs/cifsproto.h | 1 | ||||
| -rw-r--r-- | fs/cifs/connect.c | 195 | ||||
| -rw-r--r-- | fs/cifs/file.c | 72 | ||||
| -rw-r--r-- | fs/cifs/inode.c | 1 | ||||
| -rw-r--r-- | fs/cifs/ioctl.c | 16 | ||||
| -rw-r--r-- | fs/cifs/misc.c | 25 | ||||
| -rw-r--r-- | fs/ext4/ext4.h | 4 | ||||
| -rw-r--r-- | fs/ext4/inode.c | 5 | ||||
| -rw-r--r-- | fs/ext4/mballoc.c | 2 | ||||
| -rw-r--r-- | fs/ext4/page-io.c | 97 | ||||
| -rw-r--r-- | fs/ext4/super.c | 102 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 3 | ||||
| -rw-r--r-- | fs/ioprio.c | 18 | ||||
| -rw-r--r-- | fs/locks.c | 19 | ||||
| -rw-r--r-- | fs/logfs/logfs.h | 2 | ||||
| -rw-r--r-- | fs/nfsd/nfs4state.c | 16 | ||||
| -rw-r--r-- | fs/ocfs2/ocfs2.h | 6 | ||||
| -rw-r--r-- | fs/openpromfs/inode.c | 2 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 7 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 2 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_ioctl.c | 2 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_iops.c | 3 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_super.c | 3 | ||||
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 1 | ||||
| -rw-r--r-- | fs/xfs/xfs_filestream.c | 8 | ||||
| -rw-r--r-- | fs/xfs/xfs_mount.c | 1 | ||||
| -rw-r--r-- | fs/xfs/xfs_quota.h | 20 |
32 files changed, 374 insertions, 298 deletions
| @@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 370 | { | 370 | { |
| 371 | struct bio *bio; | 371 | struct bio *bio; |
| 372 | 372 | ||
| 373 | if (nr_iovecs > UIO_MAXIOV) | ||
| 374 | return NULL; | ||
| 375 | |||
| 373 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), | 376 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), |
| 374 | gfp_mask); | 377 | gfp_mask); |
| 375 | if (unlikely(!bio)) | 378 | if (unlikely(!bio)) |
| @@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd) | |||
| 697 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | 700 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, |
| 698 | gfp_t gfp_mask) | 701 | gfp_t gfp_mask) |
| 699 | { | 702 | { |
| 700 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); | 703 | struct bio_map_data *bmd; |
| 701 | 704 | ||
| 705 | if (iov_count > UIO_MAXIOV) | ||
| 706 | return NULL; | ||
| 707 | |||
| 708 | bmd = kmalloc(sizeof(*bmd), gfp_mask); | ||
| 702 | if (!bmd) | 709 | if (!bmd) |
| 703 | return NULL; | 710 | return NULL; |
| 704 | 711 | ||
| @@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 827 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 834 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 828 | start = uaddr >> PAGE_SHIFT; | 835 | start = uaddr >> PAGE_SHIFT; |
| 829 | 836 | ||
| 837 | /* | ||
| 838 | * Overflow, abort | ||
| 839 | */ | ||
| 840 | if (end < start) | ||
| 841 | return ERR_PTR(-EINVAL); | ||
| 842 | |||
| 830 | nr_pages += end - start; | 843 | nr_pages += end - start; |
| 831 | len += iov[i].iov_len; | 844 | len += iov[i].iov_len; |
| 832 | } | 845 | } |
| @@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
| 955 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 968 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 956 | unsigned long start = uaddr >> PAGE_SHIFT; | 969 | unsigned long start = uaddr >> PAGE_SHIFT; |
| 957 | 970 | ||
| 971 | /* | ||
| 972 | * Overflow, abort | ||
| 973 | */ | ||
| 974 | if (end < start) | ||
| 975 | return ERR_PTR(-EINVAL); | ||
| 976 | |||
| 958 | nr_pages += end - start; | 977 | nr_pages += end - start; |
| 959 | /* | 978 | /* |
| 960 | * buffer must be aligned to at least hardsector size for now | 979 | * buffer must be aligned to at least hardsector size for now |
| @@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
| 982 | unsigned long start = uaddr >> PAGE_SHIFT; | 1001 | unsigned long start = uaddr >> PAGE_SHIFT; |
| 983 | const int local_nr_pages = end - start; | 1002 | const int local_nr_pages = end - start; |
| 984 | const int page_limit = cur_page + local_nr_pages; | 1003 | const int page_limit = cur_page + local_nr_pages; |
| 985 | 1004 | ||
| 986 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1005 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
| 987 | write_to_vm, &pages[cur_page]); | 1006 | write_to_vm, &pages[cur_page]); |
| 988 | if (ret < local_nr_pages) { | 1007 | if (ret < local_nr_pages) { |
diff --git a/fs/cifs/TODO b/fs/cifs/TODO index 5aff46c61e52..355abcdcda98 100644 --- a/fs/cifs/TODO +++ b/fs/cifs/TODO | |||
| @@ -81,7 +81,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for | |||
| 81 | 81 | ||
| 82 | v) mount check for unmatched uids | 82 | v) mount check for unmatched uids |
| 83 | 83 | ||
| 84 | w) Add support for new vfs entry points for setlease and fallocate | 84 | w) Add support for new vfs entry point for fallocate |
| 85 | 85 | ||
| 86 | x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of | 86 | x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of |
| 87 | processes can proceed better in parallel (on the server) | 87 | processes can proceed better in parallel (on the server) |
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 525ba59a4105..e9a393c9c2ca 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | * the GNU Lesser General Public License for more details. | 15 | * the GNU Lesser General Public License for more details. |
| 16 | * | 16 | * |
| 17 | */ | 17 | */ |
| 18 | #include <linux/radix-tree.h> | 18 | #include <linux/rbtree.h> |
| 19 | 19 | ||
| 20 | #ifndef _CIFS_FS_SB_H | 20 | #ifndef _CIFS_FS_SB_H |
| 21 | #define _CIFS_FS_SB_H | 21 | #define _CIFS_FS_SB_H |
| @@ -42,9 +42,9 @@ | |||
| 42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ | 42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ |
| 43 | 43 | ||
| 44 | struct cifs_sb_info { | 44 | struct cifs_sb_info { |
| 45 | struct radix_tree_root tlink_tree; | 45 | struct rb_root tlink_tree; |
| 46 | #define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */ | ||
| 47 | spinlock_t tlink_tree_lock; | 46 | spinlock_t tlink_tree_lock; |
| 47 | struct tcon_link *master_tlink; | ||
| 48 | struct nls_table *local_nls; | 48 | struct nls_table *local_nls; |
| 49 | unsigned int rsize; | 49 | unsigned int rsize; |
| 50 | unsigned int wsize; | 50 | unsigned int wsize; |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 75c4eaa79588..9c3789762ab7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
| 116 | return -ENOMEM; | 116 | return -ENOMEM; |
| 117 | 117 | ||
| 118 | spin_lock_init(&cifs_sb->tlink_tree_lock); | 118 | spin_lock_init(&cifs_sb->tlink_tree_lock); |
| 119 | INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL); | 119 | cifs_sb->tlink_tree = RB_ROOT; |
| 120 | 120 | ||
| 121 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); | 121 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); |
| 122 | if (rc) { | 122 | if (rc) { |
| @@ -321,8 +321,7 @@ cifs_alloc_inode(struct super_block *sb) | |||
| 321 | /* Until the file is open and we have gotten oplock | 321 | /* Until the file is open and we have gotten oplock |
| 322 | info back from the server, can not assume caching of | 322 | info back from the server, can not assume caching of |
| 323 | file data or metadata */ | 323 | file data or metadata */ |
| 324 | cifs_inode->clientCanCacheRead = false; | 324 | cifs_set_oplock_level(cifs_inode, 0); |
| 325 | cifs_inode->clientCanCacheAll = false; | ||
| 326 | cifs_inode->delete_pending = false; | 325 | cifs_inode->delete_pending = false; |
| 327 | cifs_inode->invalid_mapping = false; | 326 | cifs_inode->invalid_mapping = false; |
| 328 | cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ | 327 | cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f259e4d7612d..b577bf0a1bb3 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -336,7 +336,8 @@ struct cifsTconInfo { | |||
| 336 | * "get" on the container. | 336 | * "get" on the container. |
| 337 | */ | 337 | */ |
| 338 | struct tcon_link { | 338 | struct tcon_link { |
| 339 | unsigned long tl_index; | 339 | struct rb_node tl_rbnode; |
| 340 | uid_t tl_uid; | ||
| 340 | unsigned long tl_flags; | 341 | unsigned long tl_flags; |
| 341 | #define TCON_LINK_MASTER 0 | 342 | #define TCON_LINK_MASTER 0 |
| 342 | #define TCON_LINK_PENDING 1 | 343 | #define TCON_LINK_PENDING 1 |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index edb6d90efdf2..7ed69b6b5fe6 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -104,6 +104,7 @@ extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | |||
| 104 | extern u64 cifs_UnixTimeToNT(struct timespec); | 104 | extern u64 cifs_UnixTimeToNT(struct timespec); |
| 105 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 105 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
| 106 | int offset); | 106 | int offset); |
| 107 | extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock); | ||
| 107 | 108 | ||
| 108 | extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle, | 109 | extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle, |
| 109 | struct file *file, struct tcon_link *tlink, | 110 | struct file *file, struct tcon_link *tlink, |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9eb327defa1d..251a17c03545 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -116,6 +116,7 @@ struct smb_vol { | |||
| 116 | 116 | ||
| 117 | static int ipv4_connect(struct TCP_Server_Info *server); | 117 | static int ipv4_connect(struct TCP_Server_Info *server); |
| 118 | static int ipv6_connect(struct TCP_Server_Info *server); | 118 | static int ipv6_connect(struct TCP_Server_Info *server); |
| 119 | static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); | ||
| 119 | static void cifs_prune_tlinks(struct work_struct *work); | 120 | static void cifs_prune_tlinks(struct work_struct *work); |
| 120 | 121 | ||
| 121 | /* | 122 | /* |
| @@ -2900,24 +2901,16 @@ remote_path_check: | |||
| 2900 | goto mount_fail_check; | 2901 | goto mount_fail_check; |
| 2901 | } | 2902 | } |
| 2902 | 2903 | ||
| 2903 | tlink->tl_index = pSesInfo->linux_uid; | 2904 | tlink->tl_uid = pSesInfo->linux_uid; |
| 2904 | tlink->tl_tcon = tcon; | 2905 | tlink->tl_tcon = tcon; |
| 2905 | tlink->tl_time = jiffies; | 2906 | tlink->tl_time = jiffies; |
| 2906 | set_bit(TCON_LINK_MASTER, &tlink->tl_flags); | 2907 | set_bit(TCON_LINK_MASTER, &tlink->tl_flags); |
| 2907 | set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); | 2908 | set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); |
| 2908 | 2909 | ||
| 2909 | rc = radix_tree_preload(GFP_KERNEL); | 2910 | cifs_sb->master_tlink = tlink; |
| 2910 | if (rc == -ENOMEM) { | ||
| 2911 | kfree(tlink); | ||
| 2912 | goto mount_fail_check; | ||
| 2913 | } | ||
| 2914 | |||
| 2915 | spin_lock(&cifs_sb->tlink_tree_lock); | 2911 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 2916 | radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink); | 2912 | tlink_rb_insert(&cifs_sb->tlink_tree, tlink); |
| 2917 | radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid, | ||
| 2918 | CIFS_TLINK_MASTER_TAG); | ||
| 2919 | spin_unlock(&cifs_sb->tlink_tree_lock); | 2913 | spin_unlock(&cifs_sb->tlink_tree_lock); |
| 2920 | radix_tree_preload_end(); | ||
| 2921 | 2914 | ||
| 2922 | queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, | 2915 | queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, |
| 2923 | TLINK_IDLE_EXPIRE); | 2916 | TLINK_IDLE_EXPIRE); |
| @@ -3107,32 +3100,25 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
| 3107 | int | 3100 | int |
| 3108 | cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | 3101 | cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) |
| 3109 | { | 3102 | { |
| 3110 | int i, ret; | 3103 | struct rb_root *root = &cifs_sb->tlink_tree; |
| 3104 | struct rb_node *node; | ||
| 3105 | struct tcon_link *tlink; | ||
| 3111 | char *tmp; | 3106 | char *tmp; |
| 3112 | struct tcon_link *tlink[8]; | ||
| 3113 | unsigned long index = 0; | ||
| 3114 | 3107 | ||
| 3115 | cancel_delayed_work_sync(&cifs_sb->prune_tlinks); | 3108 | cancel_delayed_work_sync(&cifs_sb->prune_tlinks); |
| 3116 | 3109 | ||
| 3117 | do { | 3110 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 3118 | spin_lock(&cifs_sb->tlink_tree_lock); | 3111 | while ((node = rb_first(root))) { |
| 3119 | ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, | 3112 | tlink = rb_entry(node, struct tcon_link, tl_rbnode); |
| 3120 | (void **)tlink, index, | 3113 | cifs_get_tlink(tlink); |
| 3121 | ARRAY_SIZE(tlink)); | 3114 | clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); |
| 3122 | /* increment index for next pass */ | 3115 | rb_erase(node, root); |
| 3123 | if (ret > 0) | ||
| 3124 | index = tlink[ret - 1]->tl_index + 1; | ||
| 3125 | for (i = 0; i < ret; i++) { | ||
| 3126 | cifs_get_tlink(tlink[i]); | ||
| 3127 | clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); | ||
| 3128 | radix_tree_delete(&cifs_sb->tlink_tree, | ||
| 3129 | tlink[i]->tl_index); | ||
| 3130 | } | ||
| 3131 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3132 | 3116 | ||
| 3133 | for (i = 0; i < ret; i++) | 3117 | spin_unlock(&cifs_sb->tlink_tree_lock); |
| 3134 | cifs_put_tlink(tlink[i]); | 3118 | cifs_put_tlink(tlink); |
| 3135 | } while (ret != 0); | 3119 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 3120 | } | ||
| 3121 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3136 | 3122 | ||
| 3137 | tmp = cifs_sb->prepath; | 3123 | tmp = cifs_sb->prepath; |
| 3138 | cifs_sb->prepathlen = 0; | 3124 | cifs_sb->prepathlen = 0; |
| @@ -3271,22 +3257,10 @@ out: | |||
| 3271 | return tcon; | 3257 | return tcon; |
| 3272 | } | 3258 | } |
| 3273 | 3259 | ||
| 3274 | static struct tcon_link * | 3260 | static inline struct tcon_link * |
| 3275 | cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) | 3261 | cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) |
| 3276 | { | 3262 | { |
| 3277 | struct tcon_link *tlink; | 3263 | return cifs_sb->master_tlink; |
| 3278 | unsigned int ret; | ||
| 3279 | |||
| 3280 | spin_lock(&cifs_sb->tlink_tree_lock); | ||
| 3281 | ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink, | ||
| 3282 | 0, 1, CIFS_TLINK_MASTER_TAG); | ||
| 3283 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3284 | |||
| 3285 | /* the master tcon should always be present */ | ||
| 3286 | if (ret == 0) | ||
| 3287 | BUG(); | ||
| 3288 | |||
| 3289 | return tlink; | ||
| 3290 | } | 3264 | } |
| 3291 | 3265 | ||
| 3292 | struct cifsTconInfo * | 3266 | struct cifsTconInfo * |
| @@ -3302,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused) | |||
| 3302 | return signal_pending(current) ? -ERESTARTSYS : 0; | 3276 | return signal_pending(current) ? -ERESTARTSYS : 0; |
| 3303 | } | 3277 | } |
| 3304 | 3278 | ||
| 3279 | /* find and return a tlink with given uid */ | ||
| 3280 | static struct tcon_link * | ||
| 3281 | tlink_rb_search(struct rb_root *root, uid_t uid) | ||
| 3282 | { | ||
| 3283 | struct rb_node *node = root->rb_node; | ||
| 3284 | struct tcon_link *tlink; | ||
| 3285 | |||
| 3286 | while (node) { | ||
| 3287 | tlink = rb_entry(node, struct tcon_link, tl_rbnode); | ||
| 3288 | |||
| 3289 | if (tlink->tl_uid > uid) | ||
| 3290 | node = node->rb_left; | ||
| 3291 | else if (tlink->tl_uid < uid) | ||
| 3292 | node = node->rb_right; | ||
| 3293 | else | ||
| 3294 | return tlink; | ||
| 3295 | } | ||
| 3296 | return NULL; | ||
| 3297 | } | ||
| 3298 | |||
| 3299 | /* insert a tcon_link into the tree */ | ||
| 3300 | static void | ||
| 3301 | tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) | ||
| 3302 | { | ||
| 3303 | struct rb_node **new = &(root->rb_node), *parent = NULL; | ||
| 3304 | struct tcon_link *tlink; | ||
| 3305 | |||
| 3306 | while (*new) { | ||
| 3307 | tlink = rb_entry(*new, struct tcon_link, tl_rbnode); | ||
| 3308 | parent = *new; | ||
| 3309 | |||
| 3310 | if (tlink->tl_uid > new_tlink->tl_uid) | ||
| 3311 | new = &((*new)->rb_left); | ||
| 3312 | else | ||
| 3313 | new = &((*new)->rb_right); | ||
| 3314 | } | ||
| 3315 | |||
| 3316 | rb_link_node(&new_tlink->tl_rbnode, parent, new); | ||
| 3317 | rb_insert_color(&new_tlink->tl_rbnode, root); | ||
| 3318 | } | ||
| 3319 | |||
| 3305 | /* | 3320 | /* |
| 3306 | * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the | 3321 | * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the |
| 3307 | * current task. | 3322 | * current task. |
| @@ -3309,7 +3324,7 @@ cifs_sb_tcon_pending_wait(void *unused) | |||
| 3309 | * If the superblock doesn't refer to a multiuser mount, then just return | 3324 | * If the superblock doesn't refer to a multiuser mount, then just return |
| 3310 | * the master tcon for the mount. | 3325 | * the master tcon for the mount. |
| 3311 | * | 3326 | * |
| 3312 | * First, search the radix tree for an existing tcon for this fsuid. If one | 3327 | * First, search the rbtree for an existing tcon for this fsuid. If one |
| 3313 | * exists, then check to see if it's pending construction. If it is then wait | 3328 | * exists, then check to see if it's pending construction. If it is then wait |
| 3314 | * for construction to complete. Once it's no longer pending, check to see if | 3329 | * for construction to complete. Once it's no longer pending, check to see if |
| 3315 | * it failed and either return an error or retry construction, depending on | 3330 | * it failed and either return an error or retry construction, depending on |
| @@ -3322,14 +3337,14 @@ struct tcon_link * | |||
| 3322 | cifs_sb_tlink(struct cifs_sb_info *cifs_sb) | 3337 | cifs_sb_tlink(struct cifs_sb_info *cifs_sb) |
| 3323 | { | 3338 | { |
| 3324 | int ret; | 3339 | int ret; |
| 3325 | unsigned long fsuid = (unsigned long) current_fsuid(); | 3340 | uid_t fsuid = current_fsuid(); |
| 3326 | struct tcon_link *tlink, *newtlink; | 3341 | struct tcon_link *tlink, *newtlink; |
| 3327 | 3342 | ||
| 3328 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) | 3343 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
| 3329 | return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); | 3344 | return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); |
| 3330 | 3345 | ||
| 3331 | spin_lock(&cifs_sb->tlink_tree_lock); | 3346 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 3332 | tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); | 3347 | tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); |
| 3333 | if (tlink) | 3348 | if (tlink) |
| 3334 | cifs_get_tlink(tlink); | 3349 | cifs_get_tlink(tlink); |
| 3335 | spin_unlock(&cifs_sb->tlink_tree_lock); | 3350 | spin_unlock(&cifs_sb->tlink_tree_lock); |
| @@ -3338,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb) | |||
| 3338 | newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); | 3353 | newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); |
| 3339 | if (newtlink == NULL) | 3354 | if (newtlink == NULL) |
| 3340 | return ERR_PTR(-ENOMEM); | 3355 | return ERR_PTR(-ENOMEM); |
| 3341 | newtlink->tl_index = fsuid; | 3356 | newtlink->tl_uid = fsuid; |
| 3342 | newtlink->tl_tcon = ERR_PTR(-EACCES); | 3357 | newtlink->tl_tcon = ERR_PTR(-EACCES); |
| 3343 | set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); | 3358 | set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); |
| 3344 | set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); | 3359 | set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); |
| 3345 | cifs_get_tlink(newtlink); | 3360 | cifs_get_tlink(newtlink); |
| 3346 | 3361 | ||
| 3347 | ret = radix_tree_preload(GFP_KERNEL); | ||
| 3348 | if (ret != 0) { | ||
| 3349 | kfree(newtlink); | ||
| 3350 | return ERR_PTR(ret); | ||
| 3351 | } | ||
| 3352 | |||
| 3353 | spin_lock(&cifs_sb->tlink_tree_lock); | 3362 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 3354 | /* was one inserted after previous search? */ | 3363 | /* was one inserted after previous search? */ |
| 3355 | tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); | 3364 | tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); |
| 3356 | if (tlink) { | 3365 | if (tlink) { |
| 3357 | cifs_get_tlink(tlink); | 3366 | cifs_get_tlink(tlink); |
| 3358 | spin_unlock(&cifs_sb->tlink_tree_lock); | 3367 | spin_unlock(&cifs_sb->tlink_tree_lock); |
| 3359 | radix_tree_preload_end(); | ||
| 3360 | kfree(newtlink); | 3368 | kfree(newtlink); |
| 3361 | goto wait_for_construction; | 3369 | goto wait_for_construction; |
| 3362 | } | 3370 | } |
| 3363 | ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink); | ||
| 3364 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3365 | radix_tree_preload_end(); | ||
| 3366 | if (ret) { | ||
| 3367 | kfree(newtlink); | ||
| 3368 | return ERR_PTR(ret); | ||
| 3369 | } | ||
| 3370 | tlink = newtlink; | 3371 | tlink = newtlink; |
| 3372 | tlink_rb_insert(&cifs_sb->tlink_tree, tlink); | ||
| 3373 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3371 | } else { | 3374 | } else { |
| 3372 | wait_for_construction: | 3375 | wait_for_construction: |
| 3373 | ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, | 3376 | ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, |
| @@ -3413,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work) | |||
| 3413 | { | 3416 | { |
| 3414 | struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, | 3417 | struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, |
| 3415 | prune_tlinks.work); | 3418 | prune_tlinks.work); |
| 3416 | struct tcon_link *tlink[8]; | 3419 | struct rb_root *root = &cifs_sb->tlink_tree; |
| 3417 | unsigned long now = jiffies; | 3420 | struct rb_node *node = rb_first(root); |
| 3418 | unsigned long index = 0; | 3421 | struct rb_node *tmp; |
| 3419 | int i, ret; | 3422 | struct tcon_link *tlink; |
| 3420 | 3423 | ||
| 3421 | do { | 3424 | /* |
| 3422 | spin_lock(&cifs_sb->tlink_tree_lock); | 3425 | * Because we drop the spinlock in the loop in order to put the tlink |
| 3423 | ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, | 3426 | * it's not guarded against removal of links from the tree. The only |
| 3424 | (void **)tlink, index, | 3427 | * places that remove entries from the tree are this function and |
| 3425 | ARRAY_SIZE(tlink)); | 3428 | * umounts. Because this function is non-reentrant and is canceled |
| 3426 | /* increment index for next pass */ | 3429 | * before umount can proceed, this is safe. |
| 3427 | if (ret > 0) | 3430 | */ |
| 3428 | index = tlink[ret - 1]->tl_index + 1; | 3431 | spin_lock(&cifs_sb->tlink_tree_lock); |
| 3429 | for (i = 0; i < ret; i++) { | 3432 | node = rb_first(root); |
| 3430 | if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) || | 3433 | while (node != NULL) { |
| 3431 | atomic_read(&tlink[i]->tl_count) != 0 || | 3434 | tmp = node; |
| 3432 | time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE, | 3435 | node = rb_next(tmp); |
| 3433 | now)) { | 3436 | tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); |
| 3434 | tlink[i] = NULL; | 3437 | |
| 3435 | continue; | 3438 | if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || |
| 3436 | } | 3439 | atomic_read(&tlink->tl_count) != 0 || |
| 3437 | cifs_get_tlink(tlink[i]); | 3440 | time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) |
| 3438 | clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); | 3441 | continue; |
| 3439 | radix_tree_delete(&cifs_sb->tlink_tree, | ||
| 3440 | tlink[i]->tl_index); | ||
| 3441 | } | ||
| 3442 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3443 | 3442 | ||
| 3444 | for (i = 0; i < ret; i++) { | 3443 | cifs_get_tlink(tlink); |
| 3445 | if (tlink[i] != NULL) | 3444 | clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); |
| 3446 | cifs_put_tlink(tlink[i]); | 3445 | rb_erase(tmp, root); |
| 3447 | } | 3446 | |
| 3448 | } while (ret != 0); | 3447 | spin_unlock(&cifs_sb->tlink_tree_lock); |
| 3448 | cifs_put_tlink(tlink); | ||
| 3449 | spin_lock(&cifs_sb->tlink_tree_lock); | ||
| 3450 | } | ||
| 3451 | spin_unlock(&cifs_sb->tlink_tree_lock); | ||
| 3449 | 3452 | ||
| 3450 | queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, | 3453 | queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, |
| 3451 | TLINK_IDLE_EXPIRE); | 3454 | TLINK_IDLE_EXPIRE); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ae82159cf7fa..06c3e83fa387 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -146,12 +146,7 @@ client_can_cache: | |||
| 146 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, | 146 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, |
| 147 | xid, NULL); | 147 | xid, NULL); |
| 148 | 148 | ||
| 149 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 149 | cifs_set_oplock_level(pCifsInode, oplock); |
| 150 | pCifsInode->clientCanCacheAll = true; | ||
| 151 | pCifsInode->clientCanCacheRead = true; | ||
| 152 | cFYI(1, "Exclusive Oplock granted on inode %p", inode); | ||
| 153 | } else if ((oplock & 0xF) == OPLOCK_READ) | ||
| 154 | pCifsInode->clientCanCacheRead = true; | ||
| 155 | 150 | ||
| 156 | return rc; | 151 | return rc; |
| 157 | } | 152 | } |
| @@ -253,12 +248,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, | |||
| 253 | list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); | 248 | list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); |
| 254 | spin_unlock(&cifs_file_list_lock); | 249 | spin_unlock(&cifs_file_list_lock); |
| 255 | 250 | ||
| 256 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 251 | cifs_set_oplock_level(pCifsInode, oplock); |
| 257 | pCifsInode->clientCanCacheAll = true; | ||
| 258 | pCifsInode->clientCanCacheRead = true; | ||
| 259 | cFYI(1, "Exclusive Oplock inode %p", inode); | ||
| 260 | } else if ((oplock & 0xF) == OPLOCK_READ) | ||
| 261 | pCifsInode->clientCanCacheRead = true; | ||
| 262 | 252 | ||
| 263 | file->private_data = pCifsFile; | 253 | file->private_data = pCifsFile; |
| 264 | return pCifsFile; | 254 | return pCifsFile; |
| @@ -271,8 +261,9 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, | |||
| 271 | */ | 261 | */ |
| 272 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | 262 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
| 273 | { | 263 | { |
| 264 | struct inode *inode = cifs_file->dentry->d_inode; | ||
| 274 | struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); | 265 | struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); |
| 275 | struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode); | 266 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
| 276 | struct cifsLockInfo *li, *tmp; | 267 | struct cifsLockInfo *li, *tmp; |
| 277 | 268 | ||
| 278 | spin_lock(&cifs_file_list_lock); | 269 | spin_lock(&cifs_file_list_lock); |
| @@ -288,8 +279,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
| 288 | if (list_empty(&cifsi->openFileList)) { | 279 | if (list_empty(&cifsi->openFileList)) { |
| 289 | cFYI(1, "closing last open instance for inode %p", | 280 | cFYI(1, "closing last open instance for inode %p", |
| 290 | cifs_file->dentry->d_inode); | 281 | cifs_file->dentry->d_inode); |
| 291 | cifsi->clientCanCacheRead = false; | 282 | cifs_set_oplock_level(cifsi, 0); |
| 292 | cifsi->clientCanCacheAll = false; | ||
| 293 | } | 283 | } |
| 294 | spin_unlock(&cifs_file_list_lock); | 284 | spin_unlock(&cifs_file_list_lock); |
| 295 | 285 | ||
| @@ -607,8 +597,6 @@ reopen_success: | |||
| 607 | rc = filemap_write_and_wait(inode->i_mapping); | 597 | rc = filemap_write_and_wait(inode->i_mapping); |
| 608 | mapping_set_error(inode->i_mapping, rc); | 598 | mapping_set_error(inode->i_mapping, rc); |
| 609 | 599 | ||
| 610 | pCifsInode->clientCanCacheAll = false; | ||
| 611 | pCifsInode->clientCanCacheRead = false; | ||
| 612 | if (tcon->unix_ext) | 600 | if (tcon->unix_ext) |
| 613 | rc = cifs_get_inode_info_unix(&inode, | 601 | rc = cifs_get_inode_info_unix(&inode, |
| 614 | full_path, inode->i_sb, xid); | 602 | full_path, inode->i_sb, xid); |
| @@ -622,18 +610,9 @@ reopen_success: | |||
| 622 | invalidate the current end of file on the server | 610 | invalidate the current end of file on the server |
| 623 | we can not go to the server to get the new inod | 611 | we can not go to the server to get the new inod |
| 624 | info */ | 612 | info */ |
| 625 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 613 | |
| 626 | pCifsInode->clientCanCacheAll = true; | 614 | cifs_set_oplock_level(pCifsInode, oplock); |
| 627 | pCifsInode->clientCanCacheRead = true; | 615 | |
| 628 | cFYI(1, "Exclusive Oplock granted on inode %p", | ||
| 629 | pCifsFile->dentry->d_inode); | ||
| 630 | } else if ((oplock & 0xF) == OPLOCK_READ) { | ||
| 631 | pCifsInode->clientCanCacheRead = true; | ||
| 632 | pCifsInode->clientCanCacheAll = false; | ||
| 633 | } else { | ||
| 634 | pCifsInode->clientCanCacheRead = false; | ||
| 635 | pCifsInode->clientCanCacheAll = false; | ||
| 636 | } | ||
| 637 | cifs_relock_file(pCifsFile); | 616 | cifs_relock_file(pCifsFile); |
| 638 | 617 | ||
| 639 | reopen_error_exit: | 618 | reopen_error_exit: |
| @@ -775,12 +754,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
| 775 | 754 | ||
| 776 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 755 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
| 777 | tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); | 756 | tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); |
| 778 | |||
| 779 | if (file->private_data == NULL) { | ||
| 780 | rc = -EBADF; | ||
| 781 | FreeXid(xid); | ||
| 782 | return rc; | ||
| 783 | } | ||
| 784 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; | 757 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; |
| 785 | 758 | ||
| 786 | if ((tcon->ses->capabilities & CAP_UNIX) && | 759 | if ((tcon->ses->capabilities & CAP_UNIX) && |
| @@ -956,6 +929,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, | |||
| 956 | ssize_t cifs_user_write(struct file *file, const char __user *write_data, | 929 | ssize_t cifs_user_write(struct file *file, const char __user *write_data, |
| 957 | size_t write_size, loff_t *poffset) | 930 | size_t write_size, loff_t *poffset) |
| 958 | { | 931 | { |
| 932 | struct inode *inode = file->f_path.dentry->d_inode; | ||
| 959 | int rc = 0; | 933 | int rc = 0; |
| 960 | unsigned int bytes_written = 0; | 934 | unsigned int bytes_written = 0; |
| 961 | unsigned int total_written; | 935 | unsigned int total_written; |
| @@ -963,7 +937,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
| 963 | struct cifsTconInfo *pTcon; | 937 | struct cifsTconInfo *pTcon; |
| 964 | int xid, long_op; | 938 | int xid, long_op; |
| 965 | struct cifsFileInfo *open_file; | 939 | struct cifsFileInfo *open_file; |
| 966 | struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); | 940 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
| 967 | 941 | ||
| 968 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 942 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
| 969 | 943 | ||
| @@ -1029,21 +1003,17 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
| 1029 | 1003 | ||
| 1030 | cifs_stats_bytes_written(pTcon, total_written); | 1004 | cifs_stats_bytes_written(pTcon, total_written); |
| 1031 | 1005 | ||
| 1032 | /* since the write may have blocked check these pointers again */ | ||
| 1033 | if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { | ||
| 1034 | struct inode *inode = file->f_path.dentry->d_inode; | ||
| 1035 | /* Do not update local mtime - server will set its actual value on write | 1006 | /* Do not update local mtime - server will set its actual value on write |
| 1036 | * inode->i_ctime = inode->i_mtime = | 1007 | * inode->i_ctime = inode->i_mtime = |
| 1037 | * current_fs_time(inode->i_sb);*/ | 1008 | * current_fs_time(inode->i_sb);*/ |
| 1038 | if (total_written > 0) { | 1009 | if (total_written > 0) { |
| 1039 | spin_lock(&inode->i_lock); | 1010 | spin_lock(&inode->i_lock); |
| 1040 | if (*poffset > file->f_path.dentry->d_inode->i_size) | 1011 | if (*poffset > inode->i_size) |
| 1041 | i_size_write(file->f_path.dentry->d_inode, | 1012 | i_size_write(inode, *poffset); |
| 1042 | *poffset); | 1013 | spin_unlock(&inode->i_lock); |
| 1043 | spin_unlock(&inode->i_lock); | ||
| 1044 | } | ||
| 1045 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
| 1046 | } | 1014 | } |
| 1015 | mark_inode_dirty_sync(inode); | ||
| 1016 | |||
| 1047 | FreeXid(xid); | 1017 | FreeXid(xid); |
| 1048 | return total_written; | 1018 | return total_written; |
| 1049 | } | 1019 | } |
| @@ -1178,7 +1148,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, | |||
| 1178 | bool fsuid_only) | 1148 | bool fsuid_only) |
| 1179 | { | 1149 | { |
| 1180 | struct cifsFileInfo *open_file; | 1150 | struct cifsFileInfo *open_file; |
| 1181 | struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); | 1151 | struct cifs_sb_info *cifs_sb; |
| 1182 | bool any_available = false; | 1152 | bool any_available = false; |
| 1183 | int rc; | 1153 | int rc; |
| 1184 | 1154 | ||
| @@ -1192,6 +1162,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, | |||
| 1192 | return NULL; | 1162 | return NULL; |
| 1193 | } | 1163 | } |
| 1194 | 1164 | ||
| 1165 | cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); | ||
| 1166 | |||
| 1195 | /* only filter by fsuid on multiuser mounts */ | 1167 | /* only filter by fsuid on multiuser mounts */ |
| 1196 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) | 1168 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
| 1197 | fsuid_only = false; | 1169 | fsuid_only = false; |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 39869c3c3efb..ef3a55bf86b6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
| @@ -2177,7 +2177,6 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) | |||
| 2177 | 2177 | ||
| 2178 | setattr_copy(inode, attrs); | 2178 | setattr_copy(inode, attrs); |
| 2179 | mark_inode_dirty(inode); | 2179 | mark_inode_dirty(inode); |
| 2180 | return 0; | ||
| 2181 | 2180 | ||
| 2182 | cifs_setattr_exit: | 2181 | cifs_setattr_exit: |
| 2183 | kfree(full_path); | 2182 | kfree(full_path); |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 077bf756f342..0c98672d0122 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
| @@ -38,10 +38,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
| 38 | struct cifs_sb_info *cifs_sb; | 38 | struct cifs_sb_info *cifs_sb; |
| 39 | #ifdef CONFIG_CIFS_POSIX | 39 | #ifdef CONFIG_CIFS_POSIX |
| 40 | struct cifsFileInfo *pSMBFile = filep->private_data; | 40 | struct cifsFileInfo *pSMBFile = filep->private_data; |
| 41 | struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink); | 41 | struct cifsTconInfo *tcon; |
| 42 | __u64 ExtAttrBits = 0; | 42 | __u64 ExtAttrBits = 0; |
| 43 | __u64 ExtAttrMask = 0; | 43 | __u64 ExtAttrMask = 0; |
| 44 | __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability); | 44 | __u64 caps; |
| 45 | #endif /* CONFIG_CIFS_POSIX */ | 45 | #endif /* CONFIG_CIFS_POSIX */ |
| 46 | 46 | ||
| 47 | xid = GetXid(); | 47 | xid = GetXid(); |
| @@ -62,9 +62,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
| 62 | break; | 62 | break; |
| 63 | #ifdef CONFIG_CIFS_POSIX | 63 | #ifdef CONFIG_CIFS_POSIX |
| 64 | case FS_IOC_GETFLAGS: | 64 | case FS_IOC_GETFLAGS: |
| 65 | if (pSMBFile == NULL) | ||
| 66 | break; | ||
| 67 | tcon = tlink_tcon(pSMBFile->tlink); | ||
| 68 | caps = le64_to_cpu(tcon->fsUnixInfo.Capability); | ||
| 65 | if (CIFS_UNIX_EXTATTR_CAP & caps) { | 69 | if (CIFS_UNIX_EXTATTR_CAP & caps) { |
| 66 | if (pSMBFile == NULL) | ||
| 67 | break; | ||
| 68 | rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, | 70 | rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, |
| 69 | &ExtAttrBits, &ExtAttrMask); | 71 | &ExtAttrBits, &ExtAttrMask); |
| 70 | if (rc == 0) | 72 | if (rc == 0) |
| @@ -75,13 +77,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
| 75 | break; | 77 | break; |
| 76 | 78 | ||
| 77 | case FS_IOC_SETFLAGS: | 79 | case FS_IOC_SETFLAGS: |
| 80 | if (pSMBFile == NULL) | ||
| 81 | break; | ||
| 82 | tcon = tlink_tcon(pSMBFile->tlink); | ||
| 83 | caps = le64_to_cpu(tcon->fsUnixInfo.Capability); | ||
| 78 | if (CIFS_UNIX_EXTATTR_CAP & caps) { | 84 | if (CIFS_UNIX_EXTATTR_CAP & caps) { |
| 79 | if (get_user(ExtAttrBits, (int __user *)arg)) { | 85 | if (get_user(ExtAttrBits, (int __user *)arg)) { |
| 80 | rc = -EFAULT; | 86 | rc = -EFAULT; |
| 81 | break; | 87 | break; |
| 82 | } | 88 | } |
| 83 | if (pSMBFile == NULL) | ||
| 84 | break; | ||
| 85 | /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, | 89 | /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, |
| 86 | extAttrBits, &ExtAttrMask);*/ | 90 | extAttrBits, &ExtAttrMask);*/ |
| 87 | } | 91 | } |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index c4e296fe3518..43f10281bc19 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -569,10 +569,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
| 569 | 569 | ||
| 570 | cFYI(1, "file id match, oplock break"); | 570 | cFYI(1, "file id match, oplock break"); |
| 571 | pCifsInode = CIFS_I(netfile->dentry->d_inode); | 571 | pCifsInode = CIFS_I(netfile->dentry->d_inode); |
| 572 | pCifsInode->clientCanCacheAll = false; | ||
| 573 | if (pSMB->OplockLevel == 0) | ||
| 574 | pCifsInode->clientCanCacheRead = false; | ||
| 575 | 572 | ||
| 573 | cifs_set_oplock_level(pCifsInode, | ||
| 574 | pSMB->OplockLevel); | ||
| 576 | /* | 575 | /* |
| 577 | * cifs_oplock_break_put() can't be called | 576 | * cifs_oplock_break_put() can't be called |
| 578 | * from here. Get reference after queueing | 577 | * from here. Get reference after queueing |
| @@ -722,3 +721,23 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) | |||
| 722 | cifs_sb_master_tcon(cifs_sb)->treeName); | 721 | cifs_sb_master_tcon(cifs_sb)->treeName); |
| 723 | } | 722 | } |
| 724 | } | 723 | } |
| 724 | |||
| 725 | void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) | ||
| 726 | { | ||
| 727 | oplock &= 0xF; | ||
| 728 | |||
| 729 | if (oplock == OPLOCK_EXCLUSIVE) { | ||
| 730 | cinode->clientCanCacheAll = true; | ||
| 731 | cinode->clientCanCacheRead = true; | ||
| 732 | cFYI(1, "Exclusive Oplock granted on inode %p", | ||
| 733 | &cinode->vfs_inode); | ||
| 734 | } else if (oplock == OPLOCK_READ) { | ||
| 735 | cinode->clientCanCacheAll = false; | ||
| 736 | cinode->clientCanCacheRead = true; | ||
| 737 | cFYI(1, "Level II Oplock granted on inode %p", | ||
| 738 | &cinode->vfs_inode); | ||
| 739 | } else { | ||
| 740 | cinode->clientCanCacheAll = false; | ||
| 741 | cinode->clientCanCacheRead = false; | ||
| 742 | } | ||
| 743 | } | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8b5dd6369f82..6a5edea2d70b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -177,7 +177,7 @@ struct mpage_da_data { | |||
| 177 | 177 | ||
| 178 | struct ext4_io_page { | 178 | struct ext4_io_page { |
| 179 | struct page *p_page; | 179 | struct page *p_page; |
| 180 | int p_count; | 180 | atomic_t p_count; |
| 181 | }; | 181 | }; |
| 182 | 182 | ||
| 183 | #define MAX_IO_PAGES 128 | 183 | #define MAX_IO_PAGES 128 |
| @@ -858,6 +858,7 @@ struct ext4_inode_info { | |||
| 858 | spinlock_t i_completed_io_lock; | 858 | spinlock_t i_completed_io_lock; |
| 859 | /* current io_end structure for async DIO write*/ | 859 | /* current io_end structure for async DIO write*/ |
| 860 | ext4_io_end_t *cur_aio_dio; | 860 | ext4_io_end_t *cur_aio_dio; |
| 861 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ | ||
| 861 | 862 | ||
| 862 | /* | 863 | /* |
| 863 | * Transactions that contain inode's metadata needed to complete | 864 | * Transactions that contain inode's metadata needed to complete |
| @@ -2060,6 +2061,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
| 2060 | /* page-io.c */ | 2061 | /* page-io.c */ |
| 2061 | extern int __init ext4_init_pageio(void); | 2062 | extern int __init ext4_init_pageio(void); |
| 2062 | extern void ext4_exit_pageio(void); | 2063 | extern void ext4_exit_pageio(void); |
| 2064 | extern void ext4_ioend_wait(struct inode *); | ||
| 2063 | extern void ext4_free_io_end(ext4_io_end_t *io); | 2065 | extern void ext4_free_io_end(ext4_io_end_t *io); |
| 2064 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); | 2066 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); |
| 2065 | extern int ext4_end_io_nolock(ext4_io_end_t *io); | 2067 | extern int ext4_end_io_nolock(ext4_io_end_t *io); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 191616470466..bdbe69902207 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | static inline int ext4_begin_ordered_truncate(struct inode *inode, | 53 | static inline int ext4_begin_ordered_truncate(struct inode *inode, |
| 54 | loff_t new_size) | 54 | loff_t new_size) |
| 55 | { | 55 | { |
| 56 | trace_ext4_begin_ordered_truncate(inode, new_size); | ||
| 56 | return jbd2_journal_begin_ordered_truncate( | 57 | return jbd2_journal_begin_ordered_truncate( |
| 57 | EXT4_SB(inode->i_sb)->s_journal, | 58 | EXT4_SB(inode->i_sb)->s_journal, |
| 58 | &EXT4_I(inode)->jinode, | 59 | &EXT4_I(inode)->jinode, |
| @@ -178,6 +179,7 @@ void ext4_evict_inode(struct inode *inode) | |||
| 178 | handle_t *handle; | 179 | handle_t *handle; |
| 179 | int err; | 180 | int err; |
| 180 | 181 | ||
| 182 | trace_ext4_evict_inode(inode); | ||
| 181 | if (inode->i_nlink) { | 183 | if (inode->i_nlink) { |
| 182 | truncate_inode_pages(&inode->i_data, 0); | 184 | truncate_inode_pages(&inode->i_data, 0); |
| 183 | goto no_delete; | 185 | goto no_delete; |
| @@ -5410,9 +5412,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
| 5410 | * will return the blocks that include the delayed allocation | 5412 | * will return the blocks that include the delayed allocation |
| 5411 | * blocks for this file. | 5413 | * blocks for this file. |
| 5412 | */ | 5414 | */ |
| 5413 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 5414 | delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; | 5415 | delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; |
| 5415 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 5416 | 5416 | ||
| 5417 | stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; | 5417 | stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; |
| 5418 | return 0; | 5418 | return 0; |
| @@ -5649,6 +5649,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
| 5649 | int err, ret; | 5649 | int err, ret; |
| 5650 | 5650 | ||
| 5651 | might_sleep(); | 5651 | might_sleep(); |
| 5652 | trace_ext4_mark_inode_dirty(inode, _RET_IP_); | ||
| 5652 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 5653 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
| 5653 | if (ext4_handle_valid(handle) && | 5654 | if (ext4_handle_valid(handle) && |
| 5654 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | 5655 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c58eba34724a..5b4d4e3a4d58 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -4640,8 +4640,6 @@ do_more: | |||
| 4640 | * with group lock held. generate_buddy look at | 4640 | * with group lock held. generate_buddy look at |
| 4641 | * them with group lock_held | 4641 | * them with group lock_held |
| 4642 | */ | 4642 | */ |
| 4643 | if (test_opt(sb, DISCARD)) | ||
| 4644 | ext4_issue_discard(sb, block_group, bit, count); | ||
| 4645 | ext4_lock_group(sb, block_group); | 4643 | ext4_lock_group(sb, block_group); |
| 4646 | mb_clear_bits(bitmap_bh->b_data, bit, count); | 4644 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
| 4647 | mb_free_blocks(inode, &e4b, bit, count); | 4645 | mb_free_blocks(inode, &e4b, bit, count); |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 46a7d6a9d976..7f5451cd1d38 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
| @@ -32,8 +32,14 @@ | |||
| 32 | 32 | ||
| 33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; | 33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; |
| 34 | 34 | ||
| 35 | #define WQ_HASH_SZ 37 | ||
| 36 | #define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ]) | ||
| 37 | static wait_queue_head_t ioend_wq[WQ_HASH_SZ]; | ||
| 38 | |||
| 35 | int __init ext4_init_pageio(void) | 39 | int __init ext4_init_pageio(void) |
| 36 | { | 40 | { |
| 41 | int i; | ||
| 42 | |||
| 37 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); | 43 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); |
| 38 | if (io_page_cachep == NULL) | 44 | if (io_page_cachep == NULL) |
| 39 | return -ENOMEM; | 45 | return -ENOMEM; |
| @@ -42,6 +48,8 @@ int __init ext4_init_pageio(void) | |||
| 42 | kmem_cache_destroy(io_page_cachep); | 48 | kmem_cache_destroy(io_page_cachep); |
| 43 | return -ENOMEM; | 49 | return -ENOMEM; |
| 44 | } | 50 | } |
| 51 | for (i = 0; i < WQ_HASH_SZ; i++) | ||
| 52 | init_waitqueue_head(&ioend_wq[i]); | ||
| 45 | 53 | ||
| 46 | return 0; | 54 | return 0; |
| 47 | } | 55 | } |
| @@ -52,24 +60,37 @@ void ext4_exit_pageio(void) | |||
| 52 | kmem_cache_destroy(io_page_cachep); | 60 | kmem_cache_destroy(io_page_cachep); |
| 53 | } | 61 | } |
| 54 | 62 | ||
| 63 | void ext4_ioend_wait(struct inode *inode) | ||
| 64 | { | ||
| 65 | wait_queue_head_t *wq = to_ioend_wq(inode); | ||
| 66 | |||
| 67 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void put_io_page(struct ext4_io_page *io_page) | ||
| 71 | { | ||
| 72 | if (atomic_dec_and_test(&io_page->p_count)) { | ||
| 73 | end_page_writeback(io_page->p_page); | ||
| 74 | put_page(io_page->p_page); | ||
| 75 | kmem_cache_free(io_page_cachep, io_page); | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 55 | void ext4_free_io_end(ext4_io_end_t *io) | 79 | void ext4_free_io_end(ext4_io_end_t *io) |
| 56 | { | 80 | { |
| 57 | int i; | 81 | int i; |
| 82 | wait_queue_head_t *wq; | ||
| 58 | 83 | ||
| 59 | BUG_ON(!io); | 84 | BUG_ON(!io); |
| 60 | if (io->page) | 85 | if (io->page) |
| 61 | put_page(io->page); | 86 | put_page(io->page); |
| 62 | for (i = 0; i < io->num_io_pages; i++) { | 87 | for (i = 0; i < io->num_io_pages; i++) |
| 63 | if (--io->pages[i]->p_count == 0) { | 88 | put_io_page(io->pages[i]); |
| 64 | struct page *page = io->pages[i]->p_page; | ||
| 65 | |||
| 66 | end_page_writeback(page); | ||
| 67 | put_page(page); | ||
| 68 | kmem_cache_free(io_page_cachep, io->pages[i]); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | io->num_io_pages = 0; | 89 | io->num_io_pages = 0; |
| 72 | iput(io->inode); | 90 | wq = to_ioend_wq(io->inode); |
| 91 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && | ||
| 92 | waitqueue_active(wq)) | ||
| 93 | wake_up_all(wq); | ||
| 73 | kmem_cache_free(io_end_cachep, io); | 94 | kmem_cache_free(io_end_cachep, io); |
| 74 | } | 95 | } |
| 75 | 96 | ||
| @@ -142,8 +163,8 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) | |||
| 142 | io = kmem_cache_alloc(io_end_cachep, flags); | 163 | io = kmem_cache_alloc(io_end_cachep, flags); |
| 143 | if (io) { | 164 | if (io) { |
| 144 | memset(io, 0, sizeof(*io)); | 165 | memset(io, 0, sizeof(*io)); |
| 145 | io->inode = igrab(inode); | 166 | atomic_inc(&EXT4_I(inode)->i_ioend_count); |
| 146 | BUG_ON(!io->inode); | 167 | io->inode = inode; |
| 147 | INIT_WORK(&io->work, ext4_end_io_work); | 168 | INIT_WORK(&io->work, ext4_end_io_work); |
| 148 | INIT_LIST_HEAD(&io->list); | 169 | INIT_LIST_HEAD(&io->list); |
| 149 | } | 170 | } |
| @@ -171,35 +192,15 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
| 171 | struct workqueue_struct *wq; | 192 | struct workqueue_struct *wq; |
| 172 | struct inode *inode; | 193 | struct inode *inode; |
| 173 | unsigned long flags; | 194 | unsigned long flags; |
| 174 | ext4_fsblk_t err_block; | ||
| 175 | int i; | 195 | int i; |
| 176 | 196 | ||
| 177 | BUG_ON(!io_end); | 197 | BUG_ON(!io_end); |
| 178 | inode = io_end->inode; | ||
| 179 | bio->bi_private = NULL; | 198 | bio->bi_private = NULL; |
| 180 | bio->bi_end_io = NULL; | 199 | bio->bi_end_io = NULL; |
| 181 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | 200 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) |
| 182 | error = 0; | 201 | error = 0; |
| 183 | err_block = bio->bi_sector >> (inode->i_blkbits - 9); | ||
| 184 | bio_put(bio); | 202 | bio_put(bio); |
| 185 | 203 | ||
| 186 | if (!(inode->i_sb->s_flags & MS_ACTIVE)) { | ||
| 187 | pr_err("sb umounted, discard end_io request for inode %lu\n", | ||
| 188 | io_end->inode->i_ino); | ||
| 189 | ext4_free_io_end(io_end); | ||
| 190 | return; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (error) { | ||
| 194 | io_end->flag |= EXT4_IO_END_ERROR; | ||
| 195 | ext4_warning(inode->i_sb, "I/O error writing to inode %lu " | ||
| 196 | "(offset %llu size %ld starting block %llu)", | ||
| 197 | inode->i_ino, | ||
| 198 | (unsigned long long) io_end->offset, | ||
| 199 | (long) io_end->size, | ||
| 200 | (unsigned long long) err_block); | ||
| 201 | } | ||
| 202 | |||
| 203 | for (i = 0; i < io_end->num_io_pages; i++) { | 204 | for (i = 0; i < io_end->num_io_pages; i++) { |
| 204 | struct page *page = io_end->pages[i]->p_page; | 205 | struct page *page = io_end->pages[i]->p_page; |
| 205 | struct buffer_head *bh, *head; | 206 | struct buffer_head *bh, *head; |
| @@ -236,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
| 236 | } while (bh != head); | 237 | } while (bh != head); |
| 237 | } | 238 | } |
| 238 | 239 | ||
| 239 | if (--io_end->pages[i]->p_count == 0) { | 240 | put_io_page(io_end->pages[i]); |
| 240 | struct page *page = io_end->pages[i]->p_page; | ||
| 241 | |||
| 242 | end_page_writeback(page); | ||
| 243 | put_page(page); | ||
| 244 | kmem_cache_free(io_page_cachep, io_end->pages[i]); | ||
| 245 | } | ||
| 246 | 241 | ||
| 247 | /* | 242 | /* |
| 248 | * If this is a partial write which happened to make | 243 | * If this is a partial write which happened to make |
| @@ -254,8 +249,19 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
| 254 | if (!partial_write) | 249 | if (!partial_write) |
| 255 | SetPageUptodate(page); | 250 | SetPageUptodate(page); |
| 256 | } | 251 | } |
| 257 | |||
| 258 | io_end->num_io_pages = 0; | 252 | io_end->num_io_pages = 0; |
| 253 | inode = io_end->inode; | ||
| 254 | |||
| 255 | if (error) { | ||
| 256 | io_end->flag |= EXT4_IO_END_ERROR; | ||
| 257 | ext4_warning(inode->i_sb, "I/O error writing to inode %lu " | ||
| 258 | "(offset %llu size %ld starting block %llu)", | ||
| 259 | inode->i_ino, | ||
| 260 | (unsigned long long) io_end->offset, | ||
| 261 | (long) io_end->size, | ||
| 262 | (unsigned long long) | ||
| 263 | bio->bi_sector >> (inode->i_blkbits - 9)); | ||
| 264 | } | ||
| 259 | 265 | ||
| 260 | /* Add the io_end to per-inode completed io list*/ | 266 | /* Add the io_end to per-inode completed io list*/ |
| 261 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | 267 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
| @@ -305,7 +311,6 @@ static int io_submit_init(struct ext4_io_submit *io, | |||
| 305 | bio->bi_private = io->io_end = io_end; | 311 | bio->bi_private = io->io_end = io_end; |
| 306 | bio->bi_end_io = ext4_end_bio; | 312 | bio->bi_end_io = ext4_end_bio; |
| 307 | 313 | ||
| 308 | io_end->inode = inode; | ||
| 309 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); | 314 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
| 310 | 315 | ||
| 311 | io->io_bio = bio; | 316 | io->io_bio = bio; |
| @@ -360,7 +365,7 @@ submit_and_retry: | |||
| 360 | if ((io_end->num_io_pages == 0) || | 365 | if ((io_end->num_io_pages == 0) || |
| 361 | (io_end->pages[io_end->num_io_pages-1] != io_page)) { | 366 | (io_end->pages[io_end->num_io_pages-1] != io_page)) { |
| 362 | io_end->pages[io_end->num_io_pages++] = io_page; | 367 | io_end->pages[io_end->num_io_pages++] = io_page; |
| 363 | io_page->p_count++; | 368 | atomic_inc(&io_page->p_count); |
| 364 | } | 369 | } |
| 365 | return 0; | 370 | return 0; |
| 366 | } | 371 | } |
| @@ -389,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
| 389 | return -ENOMEM; | 394 | return -ENOMEM; |
| 390 | } | 395 | } |
| 391 | io_page->p_page = page; | 396 | io_page->p_page = page; |
| 392 | io_page->p_count = 0; | 397 | atomic_set(&io_page->p_count, 1); |
| 393 | get_page(page); | 398 | get_page(page); |
| 394 | 399 | ||
| 395 | for (bh = head = page_buffers(page), block_start = 0; | 400 | for (bh = head = page_buffers(page), block_start = 0; |
| @@ -421,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
| 421 | * PageWriteback bit from the page to prevent the system from | 426 | * PageWriteback bit from the page to prevent the system from |
| 422 | * wedging later on. | 427 | * wedging later on. |
| 423 | */ | 428 | */ |
| 424 | if (io_page->p_count == 0) { | 429 | put_io_page(io_page); |
| 425 | put_page(page); | ||
| 426 | end_page_writeback(page); | ||
| 427 | kmem_cache_free(io_page_cachep, io_page); | ||
| 428 | } | ||
| 429 | return ret; | 430 | return ret; |
| 430 | } | 431 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 40131b777af6..61182fe6254e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -828,12 +828,22 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
| 828 | ei->cur_aio_dio = NULL; | 828 | ei->cur_aio_dio = NULL; |
| 829 | ei->i_sync_tid = 0; | 829 | ei->i_sync_tid = 0; |
| 830 | ei->i_datasync_tid = 0; | 830 | ei->i_datasync_tid = 0; |
| 831 | atomic_set(&ei->i_ioend_count, 0); | ||
| 831 | 832 | ||
| 832 | return &ei->vfs_inode; | 833 | return &ei->vfs_inode; |
| 833 | } | 834 | } |
| 834 | 835 | ||
| 836 | static int ext4_drop_inode(struct inode *inode) | ||
| 837 | { | ||
| 838 | int drop = generic_drop_inode(inode); | ||
| 839 | |||
| 840 | trace_ext4_drop_inode(inode, drop); | ||
| 841 | return drop; | ||
| 842 | } | ||
| 843 | |||
| 835 | static void ext4_destroy_inode(struct inode *inode) | 844 | static void ext4_destroy_inode(struct inode *inode) |
| 836 | { | 845 | { |
| 846 | ext4_ioend_wait(inode); | ||
| 837 | if (!list_empty(&(EXT4_I(inode)->i_orphan))) { | 847 | if (!list_empty(&(EXT4_I(inode)->i_orphan))) { |
| 838 | ext4_msg(inode->i_sb, KERN_ERR, | 848 | ext4_msg(inode->i_sb, KERN_ERR, |
| 839 | "Inode %lu (%p): orphan list check failed!", | 849 | "Inode %lu (%p): orphan list check failed!", |
| @@ -1173,6 +1183,7 @@ static const struct super_operations ext4_sops = { | |||
| 1173 | .destroy_inode = ext4_destroy_inode, | 1183 | .destroy_inode = ext4_destroy_inode, |
| 1174 | .write_inode = ext4_write_inode, | 1184 | .write_inode = ext4_write_inode, |
| 1175 | .dirty_inode = ext4_dirty_inode, | 1185 | .dirty_inode = ext4_dirty_inode, |
| 1186 | .drop_inode = ext4_drop_inode, | ||
| 1176 | .evict_inode = ext4_evict_inode, | 1187 | .evict_inode = ext4_evict_inode, |
| 1177 | .put_super = ext4_put_super, | 1188 | .put_super = ext4_put_super, |
| 1178 | .sync_fs = ext4_sync_fs, | 1189 | .sync_fs = ext4_sync_fs, |
| @@ -1194,6 +1205,7 @@ static const struct super_operations ext4_nojournal_sops = { | |||
| 1194 | .destroy_inode = ext4_destroy_inode, | 1205 | .destroy_inode = ext4_destroy_inode, |
| 1195 | .write_inode = ext4_write_inode, | 1206 | .write_inode = ext4_write_inode, |
| 1196 | .dirty_inode = ext4_dirty_inode, | 1207 | .dirty_inode = ext4_dirty_inode, |
| 1208 | .drop_inode = ext4_drop_inode, | ||
| 1197 | .evict_inode = ext4_evict_inode, | 1209 | .evict_inode = ext4_evict_inode, |
| 1198 | .write_super = ext4_write_super, | 1210 | .write_super = ext4_write_super, |
| 1199 | .put_super = ext4_put_super, | 1211 | .put_super = ext4_put_super, |
| @@ -2699,7 +2711,6 @@ static int ext4_lazyinit_thread(void *arg) | |||
| 2699 | struct ext4_li_request *elr; | 2711 | struct ext4_li_request *elr; |
| 2700 | unsigned long next_wakeup; | 2712 | unsigned long next_wakeup; |
| 2701 | DEFINE_WAIT(wait); | 2713 | DEFINE_WAIT(wait); |
| 2702 | int ret; | ||
| 2703 | 2714 | ||
| 2704 | BUG_ON(NULL == eli); | 2715 | BUG_ON(NULL == eli); |
| 2705 | 2716 | ||
| @@ -2723,13 +2734,12 @@ cont_thread: | |||
| 2723 | elr = list_entry(pos, struct ext4_li_request, | 2734 | elr = list_entry(pos, struct ext4_li_request, |
| 2724 | lr_request); | 2735 | lr_request); |
| 2725 | 2736 | ||
| 2726 | if (time_after_eq(jiffies, elr->lr_next_sched)) | 2737 | if (time_after_eq(jiffies, elr->lr_next_sched)) { |
| 2727 | ret = ext4_run_li_request(elr); | 2738 | if (ext4_run_li_request(elr) != 0) { |
| 2728 | 2739 | /* error, remove the lazy_init job */ | |
| 2729 | if (ret) { | 2740 | ext4_remove_li_request(elr); |
| 2730 | ret = 0; | 2741 | continue; |
| 2731 | ext4_remove_li_request(elr); | 2742 | } |
| 2732 | continue; | ||
| 2733 | } | 2743 | } |
| 2734 | 2744 | ||
| 2735 | if (time_before(elr->lr_next_sched, next_wakeup)) | 2745 | if (time_before(elr->lr_next_sched, next_wakeup)) |
| @@ -2740,7 +2750,8 @@ cont_thread: | |||
| 2740 | if (freezing(current)) | 2750 | if (freezing(current)) |
| 2741 | refrigerator(); | 2751 | refrigerator(); |
| 2742 | 2752 | ||
| 2743 | if (time_after_eq(jiffies, next_wakeup)) { | 2753 | if ((time_after_eq(jiffies, next_wakeup)) || |
| 2754 | (MAX_JIFFY_OFFSET == next_wakeup)) { | ||
| 2744 | cond_resched(); | 2755 | cond_resched(); |
| 2745 | continue; | 2756 | continue; |
| 2746 | } | 2757 | } |
| @@ -3348,6 +3359,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3348 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); | 3359 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); |
| 3349 | spin_lock_init(&sbi->s_next_gen_lock); | 3360 | spin_lock_init(&sbi->s_next_gen_lock); |
| 3350 | 3361 | ||
| 3362 | err = percpu_counter_init(&sbi->s_freeblocks_counter, | ||
| 3363 | ext4_count_free_blocks(sb)); | ||
| 3364 | if (!err) { | ||
| 3365 | err = percpu_counter_init(&sbi->s_freeinodes_counter, | ||
| 3366 | ext4_count_free_inodes(sb)); | ||
| 3367 | } | ||
| 3368 | if (!err) { | ||
| 3369 | err = percpu_counter_init(&sbi->s_dirs_counter, | ||
| 3370 | ext4_count_dirs(sb)); | ||
| 3371 | } | ||
| 3372 | if (!err) { | ||
| 3373 | err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); | ||
| 3374 | } | ||
| 3375 | if (err) { | ||
| 3376 | ext4_msg(sb, KERN_ERR, "insufficient memory"); | ||
| 3377 | goto failed_mount3; | ||
| 3378 | } | ||
| 3379 | |||
| 3351 | sbi->s_stripe = ext4_get_stripe_size(sbi); | 3380 | sbi->s_stripe = ext4_get_stripe_size(sbi); |
| 3352 | sbi->s_max_writeback_mb_bump = 128; | 3381 | sbi->s_max_writeback_mb_bump = 128; |
| 3353 | 3382 | ||
| @@ -3446,22 +3475,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3446 | } | 3475 | } |
| 3447 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); | 3476 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); |
| 3448 | 3477 | ||
| 3449 | no_journal: | 3478 | /* |
| 3450 | err = percpu_counter_init(&sbi->s_freeblocks_counter, | 3479 | * The journal may have updated the bg summary counts, so we |
| 3451 | ext4_count_free_blocks(sb)); | 3480 | * need to update the global counters. |
| 3452 | if (!err) | 3481 | */ |
| 3453 | err = percpu_counter_init(&sbi->s_freeinodes_counter, | 3482 | percpu_counter_set(&sbi->s_freeblocks_counter, |
| 3454 | ext4_count_free_inodes(sb)); | 3483 | ext4_count_free_blocks(sb)); |
| 3455 | if (!err) | 3484 | percpu_counter_set(&sbi->s_freeinodes_counter, |
| 3456 | err = percpu_counter_init(&sbi->s_dirs_counter, | 3485 | ext4_count_free_inodes(sb)); |
| 3457 | ext4_count_dirs(sb)); | 3486 | percpu_counter_set(&sbi->s_dirs_counter, |
| 3458 | if (!err) | 3487 | ext4_count_dirs(sb)); |
| 3459 | err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); | 3488 | percpu_counter_set(&sbi->s_dirtyblocks_counter, 0); |
| 3460 | if (err) { | ||
| 3461 | ext4_msg(sb, KERN_ERR, "insufficient memory"); | ||
| 3462 | goto failed_mount_wq; | ||
| 3463 | } | ||
| 3464 | 3489 | ||
| 3490 | no_journal: | ||
| 3465 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); | 3491 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); |
| 3466 | if (!EXT4_SB(sb)->dio_unwritten_wq) { | 3492 | if (!EXT4_SB(sb)->dio_unwritten_wq) { |
| 3467 | printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); | 3493 | printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); |
| @@ -3611,10 +3637,6 @@ failed_mount_wq: | |||
| 3611 | jbd2_journal_destroy(sbi->s_journal); | 3637 | jbd2_journal_destroy(sbi->s_journal); |
| 3612 | sbi->s_journal = NULL; | 3638 | sbi->s_journal = NULL; |
| 3613 | } | 3639 | } |
| 3614 | percpu_counter_destroy(&sbi->s_freeblocks_counter); | ||
| 3615 | percpu_counter_destroy(&sbi->s_freeinodes_counter); | ||
| 3616 | percpu_counter_destroy(&sbi->s_dirs_counter); | ||
| 3617 | percpu_counter_destroy(&sbi->s_dirtyblocks_counter); | ||
| 3618 | failed_mount3: | 3640 | failed_mount3: |
| 3619 | if (sbi->s_flex_groups) { | 3641 | if (sbi->s_flex_groups) { |
| 3620 | if (is_vmalloc_addr(sbi->s_flex_groups)) | 3642 | if (is_vmalloc_addr(sbi->s_flex_groups)) |
| @@ -3622,6 +3644,10 @@ failed_mount3: | |||
| 3622 | else | 3644 | else |
| 3623 | kfree(sbi->s_flex_groups); | 3645 | kfree(sbi->s_flex_groups); |
| 3624 | } | 3646 | } |
| 3647 | percpu_counter_destroy(&sbi->s_freeblocks_counter); | ||
| 3648 | percpu_counter_destroy(&sbi->s_freeinodes_counter); | ||
| 3649 | percpu_counter_destroy(&sbi->s_dirs_counter); | ||
| 3650 | percpu_counter_destroy(&sbi->s_dirtyblocks_counter); | ||
| 3625 | failed_mount2: | 3651 | failed_mount2: |
| 3626 | for (i = 0; i < db_count; i++) | 3652 | for (i = 0; i < db_count; i++) |
| 3627 | brelse(sbi->s_group_desc[i]); | 3653 | brelse(sbi->s_group_desc[i]); |
| @@ -3949,13 +3975,11 @@ static int ext4_commit_super(struct super_block *sb, int sync) | |||
| 3949 | else | 3975 | else |
| 3950 | es->s_kbytes_written = | 3976 | es->s_kbytes_written = |
| 3951 | cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); | 3977 | cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); |
| 3952 | if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter)) | 3978 | ext4_free_blocks_count_set(es, percpu_counter_sum_positive( |
| 3953 | ext4_free_blocks_count_set(es, percpu_counter_sum_positive( | 3979 | &EXT4_SB(sb)->s_freeblocks_counter)); |
| 3954 | &EXT4_SB(sb)->s_freeblocks_counter)); | 3980 | es->s_free_inodes_count = |
| 3955 | if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) | 3981 | cpu_to_le32(percpu_counter_sum_positive( |
| 3956 | es->s_free_inodes_count = | 3982 | &EXT4_SB(sb)->s_freeinodes_counter)); |
| 3957 | cpu_to_le32(percpu_counter_sum_positive( | ||
| 3958 | &EXT4_SB(sb)->s_freeinodes_counter)); | ||
| 3959 | sb->s_dirt = 0; | 3983 | sb->s_dirt = 0; |
| 3960 | BUFFER_TRACE(sbh, "marking dirty"); | 3984 | BUFFER_TRACE(sbh, "marking dirty"); |
| 3961 | mark_buffer_dirty(sbh); | 3985 | mark_buffer_dirty(sbh); |
| @@ -4556,12 +4580,10 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, | |||
| 4556 | 4580 | ||
| 4557 | static int ext4_quota_off(struct super_block *sb, int type) | 4581 | static int ext4_quota_off(struct super_block *sb, int type) |
| 4558 | { | 4582 | { |
| 4559 | /* Force all delayed allocation blocks to be allocated */ | 4583 | /* Force all delayed allocation blocks to be allocated. |
| 4560 | if (test_opt(sb, DELALLOC)) { | 4584 | * Caller already holds s_umount sem */ |
| 4561 | down_read(&sb->s_umount); | 4585 | if (test_opt(sb, DELALLOC)) |
| 4562 | sync_filesystem(sb); | 4586 | sync_filesystem(sb); |
| 4563 | up_read(&sb->s_umount); | ||
| 4564 | } | ||
| 4565 | 4587 | ||
| 4566 | return dquot_quota_off(sb, type); | 4588 | return dquot_quota_off(sb, type); |
| 4567 | } | 4589 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d6cfac1f0a40..a5fe68189eed 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, | |||
| 932 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | 932 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
| 933 | *user = current_user(); | 933 | *user = current_user(); |
| 934 | if (user_shm_lock(size, *user)) { | 934 | if (user_shm_lock(size, *user)) { |
| 935 | WARN_ONCE(1, | 935 | printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n"); |
| 936 | "Using mlock ulimits for SHM_HUGETLB deprecated\n"); | ||
| 937 | } else { | 936 | } else { |
| 938 | *user = NULL; | 937 | *user = NULL; |
| 939 | return ERR_PTR(-EPERM); | 938 | return ERR_PTR(-EPERM); |
diff --git a/fs/ioprio.c b/fs/ioprio.c index 748cfb92dcc6..2f7d05c89922 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
| @@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
| 111 | read_lock(&tasklist_lock); | 111 | read_lock(&tasklist_lock); |
| 112 | switch (which) { | 112 | switch (which) { |
| 113 | case IOPRIO_WHO_PROCESS: | 113 | case IOPRIO_WHO_PROCESS: |
| 114 | rcu_read_lock(); | ||
| 114 | if (!who) | 115 | if (!who) |
| 115 | p = current; | 116 | p = current; |
| 116 | else | 117 | else |
| 117 | p = find_task_by_vpid(who); | 118 | p = find_task_by_vpid(who); |
| 118 | if (p) | 119 | if (p) |
| 119 | ret = set_task_ioprio(p, ioprio); | 120 | ret = set_task_ioprio(p, ioprio); |
| 121 | rcu_read_unlock(); | ||
| 120 | break; | 122 | break; |
| 121 | case IOPRIO_WHO_PGRP: | 123 | case IOPRIO_WHO_PGRP: |
| 122 | if (!who) | 124 | if (!who) |
| @@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
| 139 | break; | 141 | break; |
| 140 | 142 | ||
| 141 | do_each_thread(g, p) { | 143 | do_each_thread(g, p) { |
| 142 | if (__task_cred(p)->uid != who) | 144 | int match; |
| 145 | |||
| 146 | rcu_read_lock(); | ||
| 147 | match = __task_cred(p)->uid == who; | ||
| 148 | rcu_read_unlock(); | ||
| 149 | if (!match) | ||
| 143 | continue; | 150 | continue; |
| 144 | ret = set_task_ioprio(p, ioprio); | 151 | ret = set_task_ioprio(p, ioprio); |
| 145 | if (ret) | 152 | if (ret) |
| @@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
| 200 | read_lock(&tasklist_lock); | 207 | read_lock(&tasklist_lock); |
| 201 | switch (which) { | 208 | switch (which) { |
| 202 | case IOPRIO_WHO_PROCESS: | 209 | case IOPRIO_WHO_PROCESS: |
| 210 | rcu_read_lock(); | ||
| 203 | if (!who) | 211 | if (!who) |
| 204 | p = current; | 212 | p = current; |
| 205 | else | 213 | else |
| 206 | p = find_task_by_vpid(who); | 214 | p = find_task_by_vpid(who); |
| 207 | if (p) | 215 | if (p) |
| 208 | ret = get_task_ioprio(p); | 216 | ret = get_task_ioprio(p); |
| 217 | rcu_read_unlock(); | ||
| 209 | break; | 218 | break; |
| 210 | case IOPRIO_WHO_PGRP: | 219 | case IOPRIO_WHO_PGRP: |
| 211 | if (!who) | 220 | if (!who) |
| @@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
| 232 | break; | 241 | break; |
| 233 | 242 | ||
| 234 | do_each_thread(g, p) { | 243 | do_each_thread(g, p) { |
| 235 | if (__task_cred(p)->uid != user->uid) | 244 | int match; |
| 245 | |||
| 246 | rcu_read_lock(); | ||
| 247 | match = __task_cred(p)->uid == user->uid; | ||
| 248 | rcu_read_unlock(); | ||
| 249 | if (!match) | ||
| 236 | continue; | 250 | continue; |
| 237 | tmpio = get_task_ioprio(p); | 251 | tmpio = get_task_ioprio(p); |
| 238 | if (tmpio < 0) | 252 | if (tmpio < 0) |
diff --git a/fs/locks.c b/fs/locks.c index 65765cb6afed..0e62dd35d088 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
| @@ -1504,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp) | |||
| 1504 | 1504 | ||
| 1505 | static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | 1505 | static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) |
| 1506 | { | 1506 | { |
| 1507 | struct file_lock *fl; | 1507 | struct file_lock *fl, *ret; |
| 1508 | struct fasync_struct *new; | 1508 | struct fasync_struct *new; |
| 1509 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
| 1510 | int error; | 1509 | int error; |
| 1511 | 1510 | ||
| 1512 | fl = lease_alloc(filp, arg); | 1511 | fl = lease_alloc(filp, arg); |
| @@ -1518,13 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |||
| 1518 | locks_free_lock(fl); | 1517 | locks_free_lock(fl); |
| 1519 | return -ENOMEM; | 1518 | return -ENOMEM; |
| 1520 | } | 1519 | } |
| 1520 | ret = fl; | ||
| 1521 | lock_flocks(); | 1521 | lock_flocks(); |
| 1522 | error = __vfs_setlease(filp, arg, &fl); | 1522 | error = __vfs_setlease(filp, arg, &ret); |
| 1523 | if (error) { | 1523 | if (error) { |
| 1524 | unlock_flocks(); | 1524 | unlock_flocks(); |
| 1525 | locks_free_lock(fl); | 1525 | locks_free_lock(fl); |
| 1526 | goto out_free_fasync; | 1526 | goto out_free_fasync; |
| 1527 | } | 1527 | } |
| 1528 | if (ret != fl) | ||
| 1529 | locks_free_lock(fl); | ||
| 1528 | 1530 | ||
| 1529 | /* | 1531 | /* |
| 1530 | * fasync_insert_entry() returns the old entry if any. | 1532 | * fasync_insert_entry() returns the old entry if any. |
| @@ -1532,17 +1534,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |||
| 1532 | * inserted it into the fasync list. Clear new so that | 1534 | * inserted it into the fasync list. Clear new so that |
| 1533 | * we don't release it here. | 1535 | * we don't release it here. |
| 1534 | */ | 1536 | */ |
| 1535 | if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) | 1537 | if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) |
| 1536 | new = NULL; | 1538 | new = NULL; |
| 1537 | 1539 | ||
| 1538 | if (error < 0) { | 1540 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); |
| 1539 | /* remove lease just inserted by setlease */ | ||
| 1540 | fl->fl_type = F_UNLCK | F_INPROGRESS; | ||
| 1541 | fl->fl_break_time = jiffies - 10; | ||
| 1542 | time_out_leases(inode); | ||
| 1543 | } else { | ||
| 1544 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | ||
| 1545 | } | ||
| 1546 | unlock_flocks(); | 1541 | unlock_flocks(); |
| 1547 | 1542 | ||
| 1548 | out_free_fasync: | 1543 | out_free_fasync: |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index cd51a36b37f0..57afd4a6fabb 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
| @@ -486,7 +486,7 @@ static inline int logfs_get_sb_bdev(struct logfs_super *s, | |||
| 486 | 486 | ||
| 487 | /* dev_mtd.c */ | 487 | /* dev_mtd.c */ |
| 488 | #ifdef CONFIG_MTD | 488 | #ifdef CONFIG_MTD |
| 489 | int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) | 489 | int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr); |
| 490 | #else | 490 | #else |
| 491 | static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) | 491 | static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) |
| 492 | { | 492 | { |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f1e5ec6b5105..ad2bfa68d534 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) | |||
| 673 | spin_unlock(&clp->cl_lock); | 673 | spin_unlock(&clp->cl_lock); |
| 674 | } | 674 | } |
| 675 | 675 | ||
| 676 | static void nfsd4_register_conn(struct nfsd4_conn *conn) | 676 | static int nfsd4_register_conn(struct nfsd4_conn *conn) |
| 677 | { | 677 | { |
| 678 | conn->cn_xpt_user.callback = nfsd4_conn_lost; | 678 | conn->cn_xpt_user.callback = nfsd4_conn_lost; |
| 679 | register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); | 679 | return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); |
| 680 | } | 680 | } |
| 681 | 681 | ||
| 682 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) | 682 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) |
| 683 | { | 683 | { |
| 684 | struct nfsd4_conn *conn; | 684 | struct nfsd4_conn *conn; |
| 685 | u32 flags = NFS4_CDFC4_FORE; | 685 | u32 flags = NFS4_CDFC4_FORE; |
| 686 | int ret; | ||
| 686 | 687 | ||
| 687 | if (ses->se_flags & SESSION4_BACK_CHAN) | 688 | if (ses->se_flags & SESSION4_BACK_CHAN) |
| 688 | flags |= NFS4_CDFC4_BACK; | 689 | flags |= NFS4_CDFC4_BACK; |
| @@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) | |||
| 690 | if (!conn) | 691 | if (!conn) |
| 691 | return nfserr_jukebox; | 692 | return nfserr_jukebox; |
| 692 | nfsd4_hash_conn(conn, ses); | 693 | nfsd4_hash_conn(conn, ses); |
| 693 | nfsd4_register_conn(conn); | 694 | ret = nfsd4_register_conn(conn); |
| 695 | if (ret) | ||
| 696 | /* oops; xprt is already down: */ | ||
| 697 | nfsd4_conn_lost(&conn->cn_xpt_user); | ||
| 694 | return nfs_ok; | 698 | return nfs_ok; |
| 695 | } | 699 | } |
| 696 | 700 | ||
| @@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi | |||
| 1644 | { | 1648 | { |
| 1645 | struct nfs4_client *clp = ses->se_client; | 1649 | struct nfs4_client *clp = ses->se_client; |
| 1646 | struct nfsd4_conn *c; | 1650 | struct nfsd4_conn *c; |
| 1651 | int ret; | ||
| 1647 | 1652 | ||
| 1648 | spin_lock(&clp->cl_lock); | 1653 | spin_lock(&clp->cl_lock); |
| 1649 | c = __nfsd4_find_conn(new->cn_xprt, ses); | 1654 | c = __nfsd4_find_conn(new->cn_xprt, ses); |
| @@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi | |||
| 1654 | } | 1659 | } |
| 1655 | __nfsd4_hash_conn(new, ses); | 1660 | __nfsd4_hash_conn(new, ses); |
| 1656 | spin_unlock(&clp->cl_lock); | 1661 | spin_unlock(&clp->cl_lock); |
| 1657 | nfsd4_register_conn(new); | 1662 | ret = nfsd4_register_conn(new); |
| 1663 | if (ret) | ||
| 1664 | /* oops; xprt is already down: */ | ||
| 1665 | nfsd4_conn_lost(&new->cn_xpt_user); | ||
| 1658 | return; | 1666 | return; |
| 1659 | } | 1667 | } |
| 1660 | 1668 | ||
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d8408217e3bd..1efea3615589 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
| @@ -159,7 +159,9 @@ struct ocfs2_lock_res { | |||
| 159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; | 159 | char l_name[OCFS2_LOCK_ID_MAX_LEN]; |
| 160 | unsigned int l_ro_holders; | 160 | unsigned int l_ro_holders; |
| 161 | unsigned int l_ex_holders; | 161 | unsigned int l_ex_holders; |
| 162 | unsigned char l_level; | 162 | char l_level; |
| 163 | char l_requested; | ||
| 164 | char l_blocking; | ||
| 163 | 165 | ||
| 164 | /* Data packed - type enum ocfs2_lock_type */ | 166 | /* Data packed - type enum ocfs2_lock_type */ |
| 165 | unsigned char l_type; | 167 | unsigned char l_type; |
| @@ -169,8 +171,6 @@ struct ocfs2_lock_res { | |||
| 169 | unsigned char l_action; | 171 | unsigned char l_action; |
| 170 | /* Data packed - enum type ocfs2_unlock_action */ | 172 | /* Data packed - enum type ocfs2_unlock_action */ |
| 171 | unsigned char l_unlock_action; | 173 | unsigned char l_unlock_action; |
| 172 | unsigned char l_requested; | ||
| 173 | unsigned char l_blocking; | ||
| 174 | unsigned int l_pending_gen; | 174 | unsigned int l_pending_gen; |
| 175 | 175 | ||
| 176 | spinlock_t l_lock; | 176 | spinlock_t l_lock; |
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index ddb1f41376e5..911e61f348fc 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c | |||
| @@ -418,7 +418,7 @@ out_no_root: | |||
| 418 | static struct dentry *openprom_mount(struct file_system_type *fs_type, | 418 | static struct dentry *openprom_mount(struct file_system_type *fs_type, |
| 419 | int flags, const char *dev_name, void *data) | 419 | int flags, const char *dev_name, void *data) |
| 420 | { | 420 | { |
| 421 | return mount_single(fs_type, flags, data, openprom_fill_super) | 421 | return mount_single(fs_type, flags, data, openprom_fill_super); |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | static struct file_system_type openprom_fs_type = { | 424 | static struct file_system_type openprom_fs_type = { |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c9af48fffcd7..7d287afccde5 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
| @@ -1111,11 +1111,12 @@ xfs_vm_writepage( | |||
| 1111 | uptodate = 0; | 1111 | uptodate = 0; |
| 1112 | 1112 | ||
| 1113 | /* | 1113 | /* |
| 1114 | * A hole may still be marked uptodate because discard_buffer | 1114 | * set_page_dirty dirties all buffers in a page, independent |
| 1115 | * leaves the flag set. | 1115 | * of their state. The dirty state however is entirely |
| 1116 | * meaningless for holes (!mapped && uptodate), so skip | ||
| 1117 | * buffers covering holes here. | ||
| 1116 | */ | 1118 | */ |
| 1117 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { | 1119 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { |
| 1118 | ASSERT(!buffer_dirty(bh)); | ||
| 1119 | imap_valid = 0; | 1120 | imap_valid = 0; |
| 1120 | continue; | 1121 | continue; |
| 1121 | } | 1122 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 63fd2c07cb57..aa1d353def29 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
| @@ -1781,7 +1781,6 @@ xfs_buf_delwri_split( | |||
| 1781 | INIT_LIST_HEAD(list); | 1781 | INIT_LIST_HEAD(list); |
| 1782 | spin_lock(dwlk); | 1782 | spin_lock(dwlk); |
| 1783 | list_for_each_entry_safe(bp, n, dwq, b_list) { | 1783 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
| 1784 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | ||
| 1785 | ASSERT(bp->b_flags & XBF_DELWRI); | 1784 | ASSERT(bp->b_flags & XBF_DELWRI); |
| 1786 | 1785 | ||
| 1787 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { | 1786 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { |
| @@ -1795,6 +1794,7 @@ xfs_buf_delwri_split( | |||
| 1795 | _XBF_RUN_QUEUES); | 1794 | _XBF_RUN_QUEUES); |
| 1796 | bp->b_flags |= XBF_WRITE; | 1795 | bp->b_flags |= XBF_WRITE; |
| 1797 | list_move_tail(&bp->b_list, list); | 1796 | list_move_tail(&bp->b_list, list); |
| 1797 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | ||
| 1798 | } else | 1798 | } else |
| 1799 | skipped++; | 1799 | skipped++; |
| 1800 | } | 1800 | } |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 2ea238f6d38e..ad442d9e392e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
| @@ -416,7 +416,7 @@ xfs_attrlist_by_handle( | |||
| 416 | if (IS_ERR(dentry)) | 416 | if (IS_ERR(dentry)) |
| 417 | return PTR_ERR(dentry); | 417 | return PTR_ERR(dentry); |
| 418 | 418 | ||
| 419 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); | 419 | kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL); |
| 420 | if (!kbuf) | 420 | if (!kbuf) |
| 421 | goto out_dput; | 421 | goto out_dput; |
| 422 | 422 | ||
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 96107efc0c61..94d5fd6a2973 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
| @@ -762,7 +762,8 @@ xfs_setup_inode( | |||
| 762 | inode->i_state = I_NEW; | 762 | inode->i_state = I_NEW; |
| 763 | 763 | ||
| 764 | inode_sb_list_add(inode); | 764 | inode_sb_list_add(inode); |
| 765 | insert_inode_hash(inode); | 765 | /* make the inode look hashed for the writeback code */ |
| 766 | hlist_add_fake(&inode->i_hash); | ||
| 766 | 767 | ||
| 767 | inode->i_mode = ip->i_d.di_mode; | 768 | inode->i_mode = ip->i_d.di_mode; |
| 768 | inode->i_nlink = ip->i_d.di_nlink; | 769 | inode->i_nlink = ip->i_d.di_nlink; |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9f3a78fe6ae4..064f964d4f3c 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -353,9 +353,6 @@ xfs_parseargs( | |||
| 353 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; | 353 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; |
| 354 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { | 354 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { |
| 355 | mp->m_flags |= XFS_MOUNT_DELAYLOG; | 355 | mp->m_flags |= XFS_MOUNT_DELAYLOG; |
| 356 | cmn_err(CE_WARN, | ||
| 357 | "Enabling EXPERIMENTAL delayed logging feature " | ||
| 358 | "- use at your own risk.\n"); | ||
| 359 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { | 356 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { |
| 360 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; | 357 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; |
| 361 | } else if (!strcmp(this_char, "ihashsize")) { | 358 | } else if (!strcmp(this_char, "ihashsize")) { |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 37d33254981d..afb0d7cfad1c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -853,6 +853,7 @@ restart: | |||
| 853 | if (trylock) { | 853 | if (trylock) { |
| 854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | 854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { |
| 855 | skipped++; | 855 | skipped++; |
| 856 | xfs_perag_put(pag); | ||
| 856 | continue; | 857 | continue; |
| 857 | } | 858 | } |
| 858 | first_index = pag->pag_ici_reclaim_cursor; | 859 | first_index = pag->pag_ici_reclaim_cursor; |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 9b715dce5699..9124425b7f2f 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
| @@ -744,9 +744,15 @@ xfs_filestream_new_ag( | |||
| 744 | * If the file's parent directory is known, take its iolock in exclusive | 744 | * If the file's parent directory is known, take its iolock in exclusive |
| 745 | * mode to prevent two sibling files from racing each other to migrate | 745 | * mode to prevent two sibling files from racing each other to migrate |
| 746 | * themselves and their parent to different AGs. | 746 | * themselves and their parent to different AGs. |
| 747 | * | ||
| 748 | * Note that we lock the parent directory iolock inside the child | ||
| 749 | * iolock here. That's fine as we never hold both parent and child | ||
| 750 | * iolock in any other place. This is different from the ilock, | ||
| 751 | * which requires locking of the child after the parent for namespace | ||
| 752 | * operations. | ||
| 747 | */ | 753 | */ |
| 748 | if (pip) | 754 | if (pip) |
| 749 | xfs_ilock(pip, XFS_IOLOCK_EXCL); | 755 | xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); |
| 750 | 756 | ||
| 751 | /* | 757 | /* |
| 752 | * A new AG needs to be found for the file. If the file's parent | 758 | * A new AG needs to be found for the file. If the file's parent |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index b1498ab5a399..19e9dfa1c254 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -275,6 +275,7 @@ xfs_free_perag( | |||
| 275 | pag = radix_tree_delete(&mp->m_perag_tree, agno); | 275 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
| 276 | spin_unlock(&mp->m_perag_lock); | 276 | spin_unlock(&mp->m_perag_lock); |
| 277 | ASSERT(pag); | 277 | ASSERT(pag); |
| 278 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
| 278 | call_rcu(&pag->rcu_head, __xfs_free_perag); | 279 | call_rcu(&pag->rcu_head, __xfs_free_perag); |
| 279 | } | 280 | } |
| 280 | } | 281 | } |
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index e0e64b113bd6..9bb6eda4cd21 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h | |||
| @@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, | |||
| 346 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) | 346 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
| 347 | #define xfs_trans_apply_dquot_deltas(tp) | 347 | #define xfs_trans_apply_dquot_deltas(tp) |
| 348 | #define xfs_trans_unreserve_and_mod_dquots(tp) | 348 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
| 349 | #define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) | 349 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, |
| 350 | #define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) | 350 | struct xfs_inode *ip, long nblks, long ninos, uint flags) |
| 351 | { | ||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, | ||
| 355 | struct xfs_mount *mp, struct xfs_dquot *udqp, | ||
| 356 | struct xfs_dquot *gdqp, long nblks, long nions, uint flags) | ||
| 357 | { | ||
| 358 | return 0; | ||
| 359 | } | ||
| 351 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) | 360 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) |
| 352 | #define xfs_qm_vop_rename_dqattach(it) (0) | 361 | #define xfs_qm_vop_rename_dqattach(it) (0) |
| 353 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) | 362 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
| @@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, | |||
| 357 | #define xfs_qm_dqdetach(ip) | 366 | #define xfs_qm_dqdetach(ip) |
| 358 | #define xfs_qm_dqrele(d) | 367 | #define xfs_qm_dqrele(d) |
| 359 | #define xfs_qm_statvfs(ip, s) | 368 | #define xfs_qm_statvfs(ip, s) |
| 360 | #define xfs_qm_sync(mp, fl) (0) | 369 | static inline int xfs_qm_sync(struct xfs_mount *mp, int flags) |
| 370 | { | ||
| 371 | return 0; | ||
| 372 | } | ||
| 361 | #define xfs_qm_newmount(mp, a, b) (0) | 373 | #define xfs_qm_newmount(mp, a, b) (0) |
| 362 | #define xfs_qm_mount_quotas(mp) | 374 | #define xfs_qm_mount_quotas(mp) |
| 363 | #define xfs_qm_unmount(mp) | 375 | #define xfs_qm_unmount(mp) |
| 364 | #define xfs_qm_unmount_quotas(mp) (0) | 376 | #define xfs_qm_unmount_quotas(mp) |
| 365 | #endif /* CONFIG_XFS_QUOTA */ | 377 | #endif /* CONFIG_XFS_QUOTA */ |
| 366 | 378 | ||
| 367 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ | 379 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ |
