aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-03-18 23:29:41 -0400
committerDavid S. Miller <davem@davemloft.net>2012-03-18 23:29:41 -0400
commit4da0bd736552e6377b407b3c3d3ae518ebbdd269 (patch)
treef0da9f843b8033565c3ca4103fccb17a60688326 /fs
parent81a430ac1b88b0702c57d2513e247317e810e04d (diff)
parentc16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/aio.c22
-rw-r--r--fs/block_dev.c16
-rw-r--r--fs/btrfs/backref.c8
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/cifs/file.c69
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/inode.c4
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nilfs2/the_nilfs.c7
-rw-r--r--fs/udf/file.c2
13 files changed, 107 insertions, 42 deletions
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d2b0888126d4..a306bb6d88d9 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -109,7 +109,7 @@ struct afs_call {
109 unsigned reply_size; /* current size of reply */ 109 unsigned reply_size; /* current size of reply */
110 unsigned first_offset; /* offset into mapping[first] */ 110 unsigned first_offset; /* offset into mapping[first] */
111 unsigned last_to; /* amount of mapping[last] */ 111 unsigned last_to; /* amount of mapping[last] */
112 unsigned short offset; /* offset into received data store */ 112 unsigned offset; /* offset into received data store */
113 unsigned char unmarshall; /* unmarshalling phase */ 113 unsigned char unmarshall; /* unmarshalling phase */
114 bool incoming; /* T if incoming call */ 114 bool incoming; /* T if incoming call */
115 bool send_pages; /* T if data from mapping should be sent */ 115 bool send_pages; /* T if data from mapping should be sent */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e45a323aebb4..8ad8c2a0703a 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
314 struct msghdr msg; 314 struct msghdr msg;
315 struct kvec iov[1]; 315 struct kvec iov[1];
316 int ret; 316 int ret;
317 struct sk_buff *skb;
317 318
318 _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); 319 _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
319 320
@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
380 381
381error_do_abort: 382error_do_abort:
382 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); 383 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
384 while ((skb = skb_dequeue(&call->rx_queue)))
385 afs_free_skb(skb);
383 rxrpc_kernel_end_call(rxcall); 386 rxrpc_kernel_end_call(rxcall);
384 call->rxcall = NULL; 387 call->rxcall = NULL;
385error_kill_call: 388error_kill_call:
diff --git a/fs/aio.c b/fs/aio.c
index 67e4b9047cc9..b9d64d89a043 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)
228 call_rcu(&ctx->rcu_head, ctx_rcu_free); 228 call_rcu(&ctx->rcu_head, ctx_rcu_free);
229} 229}
230 230
231static inline void get_ioctx(struct kioctx *kioctx)
232{
233 BUG_ON(atomic_read(&kioctx->users) <= 0);
234 atomic_inc(&kioctx->users);
235}
236
237static inline int try_get_ioctx(struct kioctx *kioctx) 231static inline int try_get_ioctx(struct kioctx *kioctx)
238{ 232{
239 return atomic_inc_not_zero(&kioctx->users); 233 return atomic_inc_not_zero(&kioctx->users);
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
273 mm = ctx->mm = current->mm; 267 mm = ctx->mm = current->mm;
274 atomic_inc(&mm->mm_count); 268 atomic_inc(&mm->mm_count);
275 269
276 atomic_set(&ctx->users, 1); 270 atomic_set(&ctx->users, 2);
277 spin_lock_init(&ctx->ctx_lock); 271 spin_lock_init(&ctx->ctx_lock);
278 spin_lock_init(&ctx->ring_info.ring_lock); 272 spin_lock_init(&ctx->ring_info.ring_lock);
279 init_waitqueue_head(&ctx->wait); 273 init_waitqueue_head(&ctx->wait);
@@ -609,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data)
609 fput(req->ki_filp); 603 fput(req->ki_filp);
610 604
611 /* Link the iocb into the context's free list */ 605 /* Link the iocb into the context's free list */
606 rcu_read_lock();
612 spin_lock_irq(&ctx->ctx_lock); 607 spin_lock_irq(&ctx->ctx_lock);
613 really_put_req(ctx, req); 608 really_put_req(ctx, req);
609 /*
610 * at that point ctx might've been killed, but actual
611 * freeing is RCU'd
612 */
614 spin_unlock_irq(&ctx->ctx_lock); 613 spin_unlock_irq(&ctx->ctx_lock);
614 rcu_read_unlock();
615 615
616 put_ioctx(ctx);
617 spin_lock_irq(&fput_lock); 616 spin_lock_irq(&fput_lock);
618 } 617 }
619 spin_unlock_irq(&fput_lock); 618 spin_unlock_irq(&fput_lock);
@@ -644,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
644 * this function will be executed w/out any aio kthread wakeup. 643 * this function will be executed w/out any aio kthread wakeup.
645 */ 644 */
646 if (unlikely(!fput_atomic(req->ki_filp))) { 645 if (unlikely(!fput_atomic(req->ki_filp))) {
647 get_ioctx(ctx);
648 spin_lock(&fput_lock); 646 spin_lock(&fput_lock);
649 list_add(&req->ki_list, &fput_head); 647 list_add(&req->ki_list, &fput_head);
650 spin_unlock(&fput_lock); 648 spin_unlock(&fput_lock);
@@ -1338,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1338 ret = PTR_ERR(ioctx); 1336 ret = PTR_ERR(ioctx);
1339 if (!IS_ERR(ioctx)) { 1337 if (!IS_ERR(ioctx)) {
1340 ret = put_user(ioctx->user_id, ctxp); 1338 ret = put_user(ioctx->user_id, ctxp);
1341 if (!ret) 1339 if (!ret) {
1340 put_ioctx(ioctx);
1342 return 0; 1341 return 0;
1343 1342 }
1344 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
1345 io_destroy(ioctx); 1343 io_destroy(ioctx);
1346 } 1344 }
1347 1345
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0e575d1304b4..5e9f198f7712 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1183 * The latter is necessary to prevent ghost 1183 * The latter is necessary to prevent ghost
1184 * partitions on a removed medium. 1184 * partitions on a removed medium.
1185 */ 1185 */
1186 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) 1186 if (bdev->bd_invalidated) {
1187 rescan_partitions(disk, bdev); 1187 if (!ret)
1188 rescan_partitions(disk, bdev);
1189 else if (ret == -ENOMEDIUM)
1190 invalidate_partitions(disk, bdev);
1191 }
1188 if (ret) 1192 if (ret)
1189 goto out_clear; 1193 goto out_clear;
1190 } else { 1194 } else {
@@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1214 if (bdev->bd_disk->fops->open) 1218 if (bdev->bd_disk->fops->open)
1215 ret = bdev->bd_disk->fops->open(bdev, mode); 1219 ret = bdev->bd_disk->fops->open(bdev, mode);
1216 /* the same as first opener case, read comment there */ 1220 /* the same as first opener case, read comment there */
1217 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) 1221 if (bdev->bd_invalidated) {
1218 rescan_partitions(bdev->bd_disk, bdev); 1222 if (!ret)
1223 rescan_partitions(bdev->bd_disk, bdev);
1224 else if (ret == -ENOMEDIUM)
1225 invalidate_partitions(bdev->bd_disk, bdev);
1226 }
1219 if (ret) 1227 if (ret)
1220 goto out_unlock_bdev; 1228 goto out_unlock_bdev;
1221 } 1229 }
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 98f6bf10bbd4..0436c12da8c2 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -583,7 +583,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
583 struct btrfs_path *path; 583 struct btrfs_path *path;
584 struct btrfs_key info_key = { 0 }; 584 struct btrfs_key info_key = { 0 };
585 struct btrfs_delayed_ref_root *delayed_refs = NULL; 585 struct btrfs_delayed_ref_root *delayed_refs = NULL;
586 struct btrfs_delayed_ref_head *head = NULL; 586 struct btrfs_delayed_ref_head *head;
587 int info_level = 0; 587 int info_level = 0;
588 int ret; 588 int ret;
589 struct list_head prefs_delayed; 589 struct list_head prefs_delayed;
@@ -607,6 +607,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
607 * at a specified point in time 607 * at a specified point in time
608 */ 608 */
609again: 609again:
610 head = NULL;
611
610 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 612 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
611 if (ret < 0) 613 if (ret < 0)
612 goto out; 614 goto out;
@@ -635,8 +637,10 @@ again:
635 goto again; 637 goto again;
636 } 638 }
637 ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed); 639 ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
638 if (ret) 640 if (ret) {
641 spin_unlock(&delayed_refs->lock);
639 goto out; 642 goto out;
643 }
640 } 644 }
641 spin_unlock(&delayed_refs->lock); 645 spin_unlock(&delayed_refs->lock);
642 646
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 2373b39a132b..22db04550f6a 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -305,7 +305,7 @@ again:
305 305
306 spin_lock(&fs_info->reada_lock); 306 spin_lock(&fs_info->reada_lock);
307 ret = radix_tree_insert(&dev->reada_zones, 307 ret = radix_tree_insert(&dev->reada_zones,
308 (unsigned long)zone->end >> PAGE_CACHE_SHIFT, 308 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
309 zone); 309 zone);
310 spin_unlock(&fs_info->reada_lock); 310 spin_unlock(&fs_info->reada_lock);
311 311
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4dd9283885e7..5e64748a2917 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
920 for (lockp = &inode->i_flock; *lockp != NULL; \ 920 for (lockp = &inode->i_flock; *lockp != NULL; \
921 lockp = &(*lockp)->fl_next) 921 lockp = &(*lockp)->fl_next)
922 922
923struct lock_to_push {
924 struct list_head llist;
925 __u64 offset;
926 __u64 length;
927 __u32 pid;
928 __u16 netfid;
929 __u8 type;
930};
931
923static int 932static int
924cifs_push_posix_locks(struct cifsFileInfo *cfile) 933cifs_push_posix_locks(struct cifsFileInfo *cfile)
925{ 934{
926 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); 935 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
927 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 936 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
928 struct file_lock *flock, **before; 937 struct file_lock *flock, **before;
929 struct cifsLockInfo *lck, *tmp; 938 unsigned int count = 0, i = 0;
930 int rc = 0, xid, type; 939 int rc = 0, xid, type;
940 struct list_head locks_to_send, *el;
941 struct lock_to_push *lck, *tmp;
931 __u64 length; 942 __u64 length;
932 struct list_head locks_to_send;
933 943
934 xid = GetXid(); 944 xid = GetXid();
935 945
@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
940 return rc; 950 return rc;
941 } 951 }
942 952
953 lock_flocks();
954 cifs_for_each_lock(cfile->dentry->d_inode, before) {
955 if ((*before)->fl_flags & FL_POSIX)
956 count++;
957 }
958 unlock_flocks();
959
943 INIT_LIST_HEAD(&locks_to_send); 960 INIT_LIST_HEAD(&locks_to_send);
944 961
962 /*
963 * Allocating count locks is enough because no locks can be added to
964 * the list while we are holding cinode->lock_mutex that protects
965 * locking operations of this inode.
966 */
967 for (; i < count; i++) {
968 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
969 if (!lck) {
970 rc = -ENOMEM;
971 goto err_out;
972 }
973 list_add_tail(&lck->llist, &locks_to_send);
974 }
975
976 i = 0;
977 el = locks_to_send.next;
945 lock_flocks(); 978 lock_flocks();
946 cifs_for_each_lock(cfile->dentry->d_inode, before) { 979 cifs_for_each_lock(cfile->dentry->d_inode, before) {
980 if (el == &locks_to_send) {
981 /* something is really wrong */
982 cERROR(1, "Can't push all brlocks!");
983 break;
984 }
947 flock = *before; 985 flock = *before;
986 if ((flock->fl_flags & FL_POSIX) == 0)
987 continue;
948 length = 1 + flock->fl_end - flock->fl_start; 988 length = 1 + flock->fl_end - flock->fl_start;
949 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) 989 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
950 type = CIFS_RDLCK; 990 type = CIFS_RDLCK;
951 else 991 else
952 type = CIFS_WRLCK; 992 type = CIFS_WRLCK;
953 993 lck = list_entry(el, struct lock_to_push, llist);
954 lck = cifs_lock_init(flock->fl_start, length, type,
955 cfile->netfid);
956 if (!lck) {
957 rc = -ENOMEM;
958 goto send_locks;
959 }
960 lck->pid = flock->fl_pid; 994 lck->pid = flock->fl_pid;
961 995 lck->netfid = cfile->netfid;
962 list_add_tail(&lck->llist, &locks_to_send); 996 lck->length = length;
997 lck->type = type;
998 lck->offset = flock->fl_start;
999 i++;
1000 el = el->next;
963 } 1001 }
964
965send_locks:
966 unlock_flocks(); 1002 unlock_flocks();
967 1003
968 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1004 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -979,11 +1015,18 @@ send_locks:
979 kfree(lck); 1015 kfree(lck);
980 } 1016 }
981 1017
1018out:
982 cinode->can_cache_brlcks = false; 1019 cinode->can_cache_brlcks = false;
983 mutex_unlock(&cinode->lock_mutex); 1020 mutex_unlock(&cinode->lock_mutex);
984 1021
985 FreeXid(xid); 1022 FreeXid(xid);
986 return rc; 1023 return rc;
1024err_out:
1025 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1026 list_del(&lck->llist);
1027 kfree(lck);
1028 }
1029 goto out;
987} 1030}
988 1031
989static int 1032static int
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 45f07c46f3ed..10d92cf57ab6 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
105 struct cifs_tcon *pTcon; 105 struct cifs_tcon *pTcon;
106 struct super_block *sb; 106 struct super_block *sb;
107 char *full_path; 107 char *full_path;
108 struct cifs_ntsd *pacl;
109 108
110 if (direntry == NULL) 109 if (direntry == NULL)
111 return -EIO; 110 return -EIO;
@@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
164 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 163 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
165 } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, 164 } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
166 strlen(CIFS_XATTR_CIFS_ACL)) == 0) { 165 strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
166#ifdef CONFIG_CIFS_ACL
167 struct cifs_ntsd *pacl;
167 pacl = kmalloc(value_size, GFP_KERNEL); 168 pacl = kmalloc(value_size, GFP_KERNEL);
168 if (!pacl) { 169 if (!pacl) {
169 cFYI(1, "%s: Can't allocate memory for ACL", 170 cFYI(1, "%s: Can't allocate memory for ACL",
170 __func__); 171 __func__);
171 rc = -ENOMEM; 172 rc = -ENOMEM;
172 } else { 173 } else {
173#ifdef CONFIG_CIFS_ACL
174 memcpy(pacl, ea_value, value_size); 174 memcpy(pacl, ea_value, value_size);
175 rc = set_cifs_acl(pacl, value_size, 175 rc = set_cifs_acl(pacl, value_size,
176 direntry->d_inode, full_path, CIFS_ACL_DACL); 176 direntry->d_inode, full_path, CIFS_ACL_DACL);
177 if (rc == 0) /* force revalidate of the inode */ 177 if (rc == 0) /* force revalidate of the inode */
178 CIFS_I(direntry->d_inode)->time = 0; 178 CIFS_I(direntry->d_inode)->time = 0;
179 kfree(pacl); 179 kfree(pacl);
180 }
180#else 181#else
181 cFYI(1, "Set CIFS ACL not supported yet"); 182 cFYI(1, "Set CIFS ACL not supported yet");
182#endif /* CONFIG_CIFS_ACL */ 183#endif /* CONFIG_CIFS_ACL */
183 }
184 } else { 184 } else {
185 int temp; 185 int temp;
186 temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, 186 temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ea54cdef04dd..4d9d3a45e356 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE];
988 988
989static int path_count_inc(int nests) 989static int path_count_inc(int nests)
990{ 990{
991 /* Allow an arbitrary number of depth 1 paths */
992 if (nests == 0)
993 return 0;
994
991 if (++path_count[nests] > path_limits[nests]) 995 if (++path_count[nests] > path_limits[nests])
992 return -1; 996 return -1;
993 return 0; 997 return 0;
diff --git a/fs/inode.c b/fs/inode.c
index d3ebdbe723d0..83ab215baab1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
938 struct file_system_type *type = inode->i_sb->s_type; 938 struct file_system_type *type = inode->i_sb->s_type;
939 939
940 /* Set new key only if filesystem hasn't already changed it */ 940 /* Set new key only if filesystem hasn't already changed it */
941 if (!lockdep_match_class(&inode->i_mutex, 941 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
942 &type->i_mutex_key)) {
943 /* 942 /*
944 * ensure nobody is actually holding i_mutex 943 * ensure nobody is actually holding i_mutex
945 */ 944 */
@@ -966,6 +965,7 @@ void unlock_new_inode(struct inode *inode)
966 spin_lock(&inode->i_lock); 965 spin_lock(&inode->i_lock);
967 WARN_ON(!(inode->i_state & I_NEW)); 966 WARN_ON(!(inode->i_state & I_NEW));
968 inode->i_state &= ~I_NEW; 967 inode->i_state &= ~I_NEW;
968 smp_mb();
969 wake_up_bit(&inode->i_state, __I_NEW); 969 wake_up_bit(&inode->i_state, __I_NEW);
970 spin_unlock(&inode->i_lock); 970 spin_unlock(&inode->i_lock);
971} 971}
diff --git a/fs/namei.c b/fs/namei.c
index e2ba62820a0f..46ea9cc16647 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2162,7 +2162,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2162 /* sayonara */ 2162 /* sayonara */
2163 error = complete_walk(nd); 2163 error = complete_walk(nd);
2164 if (error) 2164 if (error)
2165 return ERR_PTR(-ECHILD); 2165 return ERR_PTR(error);
2166 2166
2167 error = -ENOTDIR; 2167 error = -ENOTDIR;
2168 if (nd->flags & LOOKUP_DIRECTORY) { 2168 if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2261,7 +2261,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2261 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ 2261 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2262 error = complete_walk(nd); 2262 error = complete_walk(nd);
2263 if (error) 2263 if (error)
2264 goto exit; 2264 return ERR_PTR(error);
2265 error = -EISDIR; 2265 error = -EISDIR;
2266 if (S_ISDIR(nd->inode->i_mode)) 2266 if (S_ISDIR(nd->inode->i_mode))
2267 goto exit; 2267 goto exit;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index d32714094375..501b7f8b739f 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
409 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); 409 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
410 nilfs->ns_r_segments_percentage = 410 nilfs->ns_r_segments_percentage =
411 le32_to_cpu(sbp->s_r_segments_percentage); 411 le32_to_cpu(sbp->s_r_segments_percentage);
412 if (nilfs->ns_r_segments_percentage < 1 ||
413 nilfs->ns_r_segments_percentage > 99) {
414 printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
415 return -EINVAL;
416 }
417
412 nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); 418 nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
413 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); 419 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
414 return 0; 420 return 0;
@@ -515,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
515 brelse(sbh[1]); 521 brelse(sbh[1]);
516 sbh[1] = NULL; 522 sbh[1] = NULL;
517 sbp[1] = NULL; 523 sbp[1] = NULL;
524 valid[1] = 0;
518 swp = 0; 525 swp = 0;
519 } 526 }
520 if (!valid[swp]) { 527 if (!valid[swp]) {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index dca0c3881e82..d567b8448dfc 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -201,12 +201,10 @@ out:
201static int udf_release_file(struct inode *inode, struct file *filp) 201static int udf_release_file(struct inode *inode, struct file *filp)
202{ 202{
203 if (filp->f_mode & FMODE_WRITE) { 203 if (filp->f_mode & FMODE_WRITE) {
204 mutex_lock(&inode->i_mutex);
205 down_write(&UDF_I(inode)->i_data_sem); 204 down_write(&UDF_I(inode)->i_data_sem);
206 udf_discard_prealloc(inode); 205 udf_discard_prealloc(inode);
207 udf_truncate_tail_extent(inode); 206 udf_truncate_tail_extent(inode);
208 up_write(&UDF_I(inode)->i_data_sem); 207 up_write(&UDF_I(inode)->i_data_sem);
209 mutex_unlock(&inode->i_mutex);
210 } 208 }
211 return 0; 209 return 0;
212} 210}