aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:27:11 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:28:09 -0400
commit9c1d5284c79fea050f115eadeec1dd1758e5c630 (patch)
tree4d16fd5aad7ff4931e985c0128c5747f23561f8a /fs
parent5f073850602084fbcbb987948ff3e70ae273f7d2 (diff)
parent9f12600fe425bc28f0ccba034a77783c09c15af4 (diff)
Merge commit '9f12600fe425bc28f0ccba034a77783c09c15af4' into for-linus
Backmerge of dcache.c changes from mainline. It's that, or complete rebase... Conflicts: fs/splice.c Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/cmservice.c19
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c86
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/dcache.c153
-rw-r--r--fs/exec.c6
-rw-r--r--fs/kernfs/file.c17
-rw-r--r--fs/locks.c36
-rw-r--r--fs/nfsd/nfs4acl.c19
-rw-r--r--fs/nfsd/nfs4state.c40
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c8
-rw-r--r--fs/splice.c6
-rw-r--r--fs/sysfs/file.c3
-rw-r--r--fs/sysfs/mount.c3
-rw-r--r--fs/xfs/xfs_attr.c24
-rw-r--r--fs/xfs/xfs_attr_leaf.c21
-rw-r--r--fs/xfs/xfs_attr_list.c1
-rw-r--r--fs/xfs/xfs_attr_remote.c8
-rw-r--r--fs/xfs/xfs_da_btree.h2
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c8
-rw-r--r--fs/xfs/xfs_iops.c67
-rw-r--r--fs/xfs/xfs_log.c10
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_qm.c26
-rw-r--r--fs/xfs/xfs_sb.c4
-rw-r--r--fs/xfs/xfs_super.c4
29 files changed, 370 insertions, 218 deletions
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 1c8c6cc6de30..4b0eff6da674 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
130{ 130{
131 _enter(""); 131 _enter("");
132 132
133 /* Break the callbacks here so that we do it after the final ACK is
134 * received. The step number here must match the final number in
135 * afs_deliver_cb_callback().
136 */
137 if (call->unmarshall == 6) {
138 ASSERT(call->server && call->count && call->request);
139 afs_break_callbacks(call->server, call->count, call->request);
140 }
141
133 afs_put_server(call->server); 142 afs_put_server(call->server);
134 call->server = NULL; 143 call->server = NULL;
135 kfree(call->buffer); 144 kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
272 _debug("trailer"); 281 _debug("trailer");
273 if (skb->len != 0) 282 if (skb->len != 0)
274 return -EBADMSG; 283 return -EBADMSG;
284
285 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking
287 * work, even if the final ACK isn't received.
288 *
289 * If the step number changes, then afs_cm_destructor() must be
290 * updated also.
291 */
292 call->unmarshall++;
293 case 6:
275 break; 294 break;
276 } 295 }
277 296
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d2f91bd615a9..71d5982312f3 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -75,7 +75,7 @@ struct afs_call {
75 const struct afs_call_type *type; /* type of call */ 75 const struct afs_call_type *type; /* type of call */
76 const struct afs_wait_mode *wait_mode; /* completion wait mode */ 76 const struct afs_wait_mode *wait_mode; /* completion wait mode */
77 wait_queue_head_t waitq; /* processes awaiting completion */ 77 wait_queue_head_t waitq; /* processes awaiting completion */
78 work_func_t async_workfn; 78 void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
79 struct work_struct async_work; /* asynchronous work processor */ 79 struct work_struct async_work; /* asynchronous work processor */
80 struct work_struct work; /* actual work processor */ 80 struct work_struct work; /* actual work processor */
81 struct sk_buff_head rx_queue; /* received packets */ 81 struct sk_buff_head rx_queue; /* received packets */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ef943df73b8c..03a3beb17004 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
25static int afs_wait_for_call_to_complete(struct afs_call *); 25static int afs_wait_for_call_to_complete(struct afs_call *);
26static void afs_wake_up_async_call(struct afs_call *); 26static void afs_wake_up_async_call(struct afs_call *);
27static int afs_dont_wait_for_call_to_complete(struct afs_call *); 27static int afs_dont_wait_for_call_to_complete(struct afs_call *);
28static void afs_process_async_call(struct work_struct *); 28static void afs_process_async_call(struct afs_call *);
29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); 29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); 30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
31 31
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
58static struct sk_buff_head afs_incoming_calls; 58static struct sk_buff_head afs_incoming_calls;
59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); 59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
60 60
61static void afs_async_workfn(struct work_struct *work)
62{
63 struct afs_call *call = container_of(work, struct afs_call, async_work);
64
65 call->async_workfn(call);
66}
67
61/* 68/*
62 * open an RxRPC socket and bind it to be a server for callback notifications 69 * open an RxRPC socket and bind it to be a server for callback notifications
63 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -184,6 +191,28 @@ static void afs_free_call(struct afs_call *call)
184} 191}
185 192
186/* 193/*
194 * End a call but do not free it
195 */
196static void afs_end_call_nofree(struct afs_call *call)
197{
198 if (call->rxcall) {
199 rxrpc_kernel_end_call(call->rxcall);
200 call->rxcall = NULL;
201 }
202 if (call->type->destructor)
203 call->type->destructor(call);
204}
205
206/*
207 * End a call and free it
208 */
209static void afs_end_call(struct afs_call *call)
210{
211 afs_end_call_nofree(call);
212 afs_free_call(call);
213}
214
215/*
187 * allocate a call with flat request and reply buffers 216 * allocate a call with flat request and reply buffers
188 */ 217 */
189struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 218struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
326 atomic_read(&afs_outstanding_calls)); 355 atomic_read(&afs_outstanding_calls));
327 356
328 call->wait_mode = wait_mode; 357 call->wait_mode = wait_mode;
329 INIT_WORK(&call->async_work, afs_process_async_call); 358 call->async_workfn = afs_process_async_call;
359 INIT_WORK(&call->async_work, afs_async_workfn);
330 360
331 memset(&srx, 0, sizeof(srx)); 361 memset(&srx, 0, sizeof(srx));
332 srx.srx_family = AF_RXRPC; 362 srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
383 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); 413 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
384 while ((skb = skb_dequeue(&call->rx_queue))) 414 while ((skb = skb_dequeue(&call->rx_queue)))
385 afs_free_skb(skb); 415 afs_free_skb(skb);
386 rxrpc_kernel_end_call(rxcall);
387 call->rxcall = NULL;
388error_kill_call: 416error_kill_call:
389 call->type->destructor(call); 417 afs_end_call(call);
390 afs_free_call(call);
391 _leave(" = %d", ret); 418 _leave(" = %d", ret);
392 return ret; 419 return ret;
393} 420}
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
509 if (call->state >= AFS_CALL_COMPLETE) { 536 if (call->state >= AFS_CALL_COMPLETE) {
510 while ((skb = skb_dequeue(&call->rx_queue))) 537 while ((skb = skb_dequeue(&call->rx_queue)))
511 afs_free_skb(skb); 538 afs_free_skb(skb);
512 if (call->incoming) { 539 if (call->incoming)
513 rxrpc_kernel_end_call(call->rxcall); 540 afs_end_call(call);
514 call->rxcall = NULL;
515 call->type->destructor(call);
516 afs_free_call(call);
517 }
518 } 541 }
519 542
520 _leave(""); 543 _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
564 } 587 }
565 588
566 _debug("call complete"); 589 _debug("call complete");
567 rxrpc_kernel_end_call(call->rxcall); 590 afs_end_call(call);
568 call->rxcall = NULL;
569 call->type->destructor(call);
570 afs_free_call(call);
571 _leave(" = %d", ret); 591 _leave(" = %d", ret);
572 return ret; 592 return ret;
573} 593}
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
603/* 623/*
604 * delete an asynchronous call 624 * delete an asynchronous call
605 */ 625 */
606static void afs_delete_async_call(struct work_struct *work) 626static void afs_delete_async_call(struct afs_call *call)
607{ 627{
608 struct afs_call *call =
609 container_of(work, struct afs_call, async_work);
610
611 _enter(""); 628 _enter("");
612 629
613 afs_free_call(call); 630 afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
620 * - on a multiple-thread workqueue this work item may try to run on several 637 * - on a multiple-thread workqueue this work item may try to run on several
621 * CPUs at the same time 638 * CPUs at the same time
622 */ 639 */
623static void afs_process_async_call(struct work_struct *work) 640static void afs_process_async_call(struct afs_call *call)
624{ 641{
625 struct afs_call *call =
626 container_of(work, struct afs_call, async_work);
627
628 _enter(""); 642 _enter("");
629 643
630 if (!skb_queue_empty(&call->rx_queue)) 644 if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
637 call->reply = NULL; 651 call->reply = NULL;
638 652
639 /* kill the call */ 653 /* kill the call */
640 rxrpc_kernel_end_call(call->rxcall); 654 afs_end_call_nofree(call);
641 call->rxcall = NULL;
642 if (call->type->destructor)
643 call->type->destructor(call);
644 655
645 /* we can't just delete the call because the work item may be 656 /* we can't just delete the call because the work item may be
646 * queued */ 657 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
663 call->reply_size += len; 674 call->reply_size += len;
664} 675}
665 676
666static void afs_async_workfn(struct work_struct *work)
667{
668 struct afs_call *call = container_of(work, struct afs_call, async_work);
669
670 call->async_workfn(work);
671}
672
673/* 677/*
674 * accept the backlog of incoming calls 678 * accept the backlog of incoming calls
675 */ 679 */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
790 _debug("oom"); 794 _debug("oom");
791 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 795 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
792 default: 796 default:
793 rxrpc_kernel_end_call(call->rxcall); 797 afs_end_call(call);
794 call->rxcall = NULL;
795 call->type->destructor(call);
796 afs_free_call(call);
797 _leave(" [error]"); 798 _leave(" [error]");
798 return; 799 return;
799 } 800 }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
823 call->state = AFS_CALL_AWAIT_ACK; 824 call->state = AFS_CALL_AWAIT_ACK;
824 n = rxrpc_kernel_send_data(call->rxcall, &msg, len); 825 n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
825 if (n >= 0) { 826 if (n >= 0) {
827 /* Success */
826 _leave(" [replied]"); 828 _leave(" [replied]");
827 return; 829 return;
828 } 830 }
831
829 if (n == -ENOMEM) { 832 if (n == -ENOMEM) {
830 _debug("oom"); 833 _debug("oom");
831 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 834 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
832 } 835 }
833 rxrpc_kernel_end_call(call->rxcall); 836 afs_end_call(call);
834 call->rxcall = NULL;
835 call->type->destructor(call);
836 afs_free_call(call);
837 _leave(" [error]"); 837 _leave(" [error]");
838} 838}
839 839
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2ad7de94efef..2f6d7b13b5bd 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3120,6 +3120,8 @@ process_slot:
3120 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 3120 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3121 u64 skip = 0; 3121 u64 skip = 0;
3122 u64 trim = 0; 3122 u64 trim = 0;
3123 u64 aligned_end = 0;
3124
3123 if (off > key.offset) { 3125 if (off > key.offset) {
3124 skip = off - key.offset; 3126 skip = off - key.offset;
3125 new_key.offset += skip; 3127 new_key.offset += skip;
@@ -3136,9 +3138,11 @@ process_slot:
3136 size -= skip + trim; 3138 size -= skip + trim;
3137 datal -= skip + trim; 3139 datal -= skip + trim;
3138 3140
3141 aligned_end = ALIGN(new_key.offset + datal,
3142 root->sectorsize);
3139 ret = btrfs_drop_extents(trans, root, inode, 3143 ret = btrfs_drop_extents(trans, root, inode,
3140 new_key.offset, 3144 new_key.offset,
3141 new_key.offset + datal, 3145 aligned_end,
3142 1); 3146 1);
3143 if (ret) { 3147 if (ret) {
3144 if (ret != -EOPNOTSUPP) 3148 if (ret != -EOPNOTSUPP)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index eb6537a08c1b..fd38b5053479 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
1668 goto out; 1668 goto out;
1669 } 1669 }
1670 1670
1671 if (key.type == BTRFS_INODE_REF_KEY) { 1671 if (found_key.type == BTRFS_INODE_REF_KEY) {
1672 struct btrfs_inode_ref *iref; 1672 struct btrfs_inode_ref *iref;
1673 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1673 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1674 struct btrfs_inode_ref); 1674 struct btrfs_inode_ref);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aadc2b68678b..a22d667f1069 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode)
1737 if (cifs_i->time == 0) 1737 if (cifs_i->time == 0)
1738 return true; 1738 return true;
1739 1739
1740 if (!cifs_sb->actimeo)
1741 return true;
1742
1740 if (!time_in_range(jiffies, cifs_i->time, 1743 if (!time_in_range(jiffies, cifs_i->time,
1741 cifs_i->time + cifs_sb->actimeo)) 1744 cifs_i->time + cifs_sb->actimeo))
1742 return true; 1745 return true;
diff --git a/fs/dcache.c b/fs/dcache.c
index 42ae01eefc07..be2bea834bf4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry)
441} 441}
442EXPORT_SYMBOL(d_drop); 442EXPORT_SYMBOL(d_drop);
443 443
444/* 444static void __dentry_kill(struct dentry *dentry)
445 * Finish off a dentry we've decided to kill.
446 * dentry->d_lock must be held, returns with it unlocked.
447 * If ref is non-zero, then decrement the refcount too.
448 * Returns dentry requiring refcount drop, or NULL if we're done.
449 */
450static struct dentry *
451dentry_kill(struct dentry *dentry, int unlock_on_failure)
452 __releases(dentry->d_lock)
453{ 445{
454 struct inode *inode;
455 struct dentry *parent = NULL; 446 struct dentry *parent = NULL;
456 bool can_free = true; 447 bool can_free = true;
457
458 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
459 can_free = dentry->d_flags & DCACHE_MAY_FREE;
460 spin_unlock(&dentry->d_lock);
461 goto out;
462 }
463
464 inode = dentry->d_inode;
465 if (inode && !spin_trylock(&inode->i_lock)) {
466relock:
467 if (unlock_on_failure) {
468 spin_unlock(&dentry->d_lock);
469 cpu_relax();
470 }
471 return dentry; /* try again with same dentry */
472 }
473 if (!IS_ROOT(dentry)) 448 if (!IS_ROOT(dentry))
474 parent = dentry->d_parent; 449 parent = dentry->d_parent;
475 if (parent && !spin_trylock(&parent->d_lock)) {
476 if (inode)
477 spin_unlock(&inode->i_lock);
478 goto relock;
479 }
480 450
481 /* 451 /*
482 * The dentry is now unrecoverably dead to the world. 452 * The dentry is now unrecoverably dead to the world.
@@ -520,9 +490,72 @@ relock:
520 can_free = false; 490 can_free = false;
521 } 491 }
522 spin_unlock(&dentry->d_lock); 492 spin_unlock(&dentry->d_lock);
523out:
524 if (likely(can_free)) 493 if (likely(can_free))
525 dentry_free(dentry); 494 dentry_free(dentry);
495}
496
497/*
498 * Finish off a dentry we've decided to kill.
499 * dentry->d_lock must be held, returns with it unlocked.
500 * If ref is non-zero, then decrement the refcount too.
501 * Returns dentry requiring refcount drop, or NULL if we're done.
502 */
503static struct dentry *dentry_kill(struct dentry *dentry)
504 __releases(dentry->d_lock)
505{
506 struct inode *inode = dentry->d_inode;
507 struct dentry *parent = NULL;
508
509 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
510 goto failed;
511
512 if (!IS_ROOT(dentry)) {
513 parent = dentry->d_parent;
514 if (unlikely(!spin_trylock(&parent->d_lock))) {
515 if (inode)
516 spin_unlock(&inode->i_lock);
517 goto failed;
518 }
519 }
520
521 __dentry_kill(dentry);
522 return parent;
523
524failed:
525 spin_unlock(&dentry->d_lock);
526 cpu_relax();
527 return dentry; /* try again with same dentry */
528}
529
530static inline struct dentry *lock_parent(struct dentry *dentry)
531{
532 struct dentry *parent = dentry->d_parent;
533 if (IS_ROOT(dentry))
534 return NULL;
535 if (likely(spin_trylock(&parent->d_lock)))
536 return parent;
537 spin_unlock(&dentry->d_lock);
538 rcu_read_lock();
539again:
540 parent = ACCESS_ONCE(dentry->d_parent);
541 spin_lock(&parent->d_lock);
542 /*
543 * We can't blindly lock dentry until we are sure
544 * that we won't violate the locking order.
545 * Any changes of dentry->d_parent must have
546 * been done with parent->d_lock held, so
547 * spin_lock() above is enough of a barrier
548 * for checking if it's still our child.
549 */
550 if (unlikely(parent != dentry->d_parent)) {
551 spin_unlock(&parent->d_lock);
552 goto again;
553 }
554 rcu_read_unlock();
555 if (parent != dentry)
556 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
557 else
558 parent = NULL;
526 return parent; 559 return parent;
527} 560}
528 561
@@ -579,7 +612,7 @@ repeat:
579 return; 612 return;
580 613
581kill_it: 614kill_it:
582 dentry = dentry_kill(dentry, 1); 615 dentry = dentry_kill(dentry);
583 if (dentry) 616 if (dentry)
584 goto repeat; 617 goto repeat;
585} 618}
@@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list)
797 struct dentry *dentry, *parent; 830 struct dentry *dentry, *parent;
798 831
799 while (!list_empty(list)) { 832 while (!list_empty(list)) {
833 struct inode *inode;
800 dentry = list_entry(list->prev, struct dentry, d_lru); 834 dentry = list_entry(list->prev, struct dentry, d_lru);
801 spin_lock(&dentry->d_lock); 835 spin_lock(&dentry->d_lock);
836 parent = lock_parent(dentry);
837
802 /* 838 /*
803 * The dispose list is isolated and dentries are not accounted 839 * The dispose list is isolated and dentries are not accounted
804 * to the LRU here, so we can simply remove it from the list 840 * to the LRU here, so we can simply remove it from the list
@@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list)
812 */ 848 */
813 if ((int)dentry->d_lockref.count > 0) { 849 if ((int)dentry->d_lockref.count > 0) {
814 spin_unlock(&dentry->d_lock); 850 spin_unlock(&dentry->d_lock);
851 if (parent)
852 spin_unlock(&parent->d_lock);
815 continue; 853 continue;
816 } 854 }
817 855
818 parent = dentry_kill(dentry, 0); 856
819 /* 857 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
820 * If dentry_kill returns NULL, we have nothing more to do. 858 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
821 */ 859 spin_unlock(&dentry->d_lock);
822 if (!parent) 860 if (parent)
861 spin_unlock(&parent->d_lock);
862 if (can_free)
863 dentry_free(dentry);
823 continue; 864 continue;
865 }
824 866
825 if (unlikely(parent == dentry)) { 867 inode = dentry->d_inode;
826 /* 868 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
827 * trylocks have failed and d_lock has been held the
828 * whole time, so it could not have been added to any
829 * other lists. Just add it back to the shrink list.
830 */
831 d_shrink_add(dentry, list); 869 d_shrink_add(dentry, list);
832 spin_unlock(&dentry->d_lock); 870 spin_unlock(&dentry->d_lock);
871 if (parent)
872 spin_unlock(&parent->d_lock);
833 continue; 873 continue;
834 } 874 }
875
876 __dentry_kill(dentry);
877
835 /* 878 /*
836 * We need to prune ancestors too. This is necessary to prevent 879 * We need to prune ancestors too. This is necessary to prevent
837 * quadratic behavior of shrink_dcache_parent(), but is also 880 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list)
839 * fragmentation. 882 * fragmentation.
840 */ 883 */
841 dentry = parent; 884 dentry = parent;
842 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) 885 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
843 dentry = dentry_kill(dentry, 1); 886 parent = lock_parent(dentry);
887 if (dentry->d_lockref.count != 1) {
888 dentry->d_lockref.count--;
889 spin_unlock(&dentry->d_lock);
890 if (parent)
891 spin_unlock(&parent->d_lock);
892 break;
893 }
894 inode = dentry->d_inode; /* can't be NULL */
895 if (unlikely(!spin_trylock(&inode->i_lock))) {
896 spin_unlock(&dentry->d_lock);
897 if (parent)
898 spin_unlock(&parent->d_lock);
899 cpu_relax();
900 continue;
901 }
902 __dentry_kill(dentry);
903 dentry = parent;
904 }
844 } 905 }
845} 906}
846 907
diff --git a/fs/exec.c b/fs/exec.c
index 476f3ebf437e..238b7aa26f68 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
657 unsigned long rlim_stack; 657 unsigned long rlim_stack;
658 658
659#ifdef CONFIG_STACK_GROWSUP 659#ifdef CONFIG_STACK_GROWSUP
660 /* Limit stack size to 1GB */ 660 /* Limit stack size */
661 stack_base = rlimit_max(RLIMIT_STACK); 661 stack_base = rlimit_max(RLIMIT_STACK);
662 if (stack_base > (1 << 30)) 662 if (stack_base > STACK_SIZE_MAX)
663 stack_base = 1 << 30; 663 stack_base = STACK_SIZE_MAX;
664 664
665 /* Make sure we didn't let the argument array grow too large. */ 665 /* Make sure we didn't let the argument array grow too large. */
666 if (vma->vm_end - vma->vm_start > stack_base) 666 if (vma->vm_end - vma->vm_start > stack_base)
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e01ea4a14a01..5e9a80cfc3d8 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
610static int kernfs_fop_open(struct inode *inode, struct file *file) 610static int kernfs_fop_open(struct inode *inode, struct file *file)
611{ 611{
612 struct kernfs_node *kn = file->f_path.dentry->d_fsdata; 612 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
613 struct kernfs_root *root = kernfs_root(kn);
613 const struct kernfs_ops *ops; 614 const struct kernfs_ops *ops;
614 struct kernfs_open_file *of; 615 struct kernfs_open_file *of;
615 bool has_read, has_write, has_mmap; 616 bool has_read, has_write, has_mmap;
@@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
624 has_write = ops->write || ops->mmap; 625 has_write = ops->write || ops->mmap;
625 has_mmap = ops->mmap; 626 has_mmap = ops->mmap;
626 627
627 /* check perms and supported operations */ 628 /* see the flag definition for details */
628 if ((file->f_mode & FMODE_WRITE) && 629 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
629 (!(inode->i_mode & S_IWUGO) || !has_write)) 630 if ((file->f_mode & FMODE_WRITE) &&
630 goto err_out; 631 (!(inode->i_mode & S_IWUGO) || !has_write))
632 goto err_out;
631 633
632 if ((file->f_mode & FMODE_READ) && 634 if ((file->f_mode & FMODE_READ) &&
633 (!(inode->i_mode & S_IRUGO) || !has_read)) 635 (!(inode->i_mode & S_IRUGO) || !has_read))
634 goto err_out; 636 goto err_out;
637 }
635 638
636 /* allocate a kernfs_open_file for the file */ 639 /* allocate a kernfs_open_file for the file */
637 error = -ENOMEM; 640 error = -ENOMEM;
diff --git a/fs/locks.c b/fs/locks.c
index e663aeac579e..e390bd9ae068 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
389 fl->fl_ops = NULL; 389 fl->fl_ops = NULL;
390 fl->fl_lmops = NULL; 390 fl->fl_lmops = NULL;
391 391
392 /* Ensure that fl->fl_filp has compatible f_mode */
393 switch (l->l_type) {
394 case F_RDLCK:
395 if (!(filp->f_mode & FMODE_READ))
396 return -EBADF;
397 break;
398 case F_WRLCK:
399 if (!(filp->f_mode & FMODE_WRITE))
400 return -EBADF;
401 break;
402 }
403
404 return assign_type(fl, l->l_type); 392 return assign_type(fl, l->l_type);
405} 393}
406 394
@@ -2034,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2034 return error; 2022 return error;
2035} 2023}
2036 2024
2025/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
2026static int
2027check_fmode_for_setlk(struct file_lock *fl)
2028{
2029 switch (fl->fl_type) {
2030 case F_RDLCK:
2031 if (!(fl->fl_file->f_mode & FMODE_READ))
2032 return -EBADF;
2033 break;
2034 case F_WRLCK:
2035 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2036 return -EBADF;
2037 }
2038 return 0;
2039}
2040
2037/* Apply the lock described by l to an open file descriptor. 2041/* Apply the lock described by l to an open file descriptor.
2038 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2042 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2039 */ 2043 */
@@ -2071,6 +2075,10 @@ again:
2071 if (error) 2075 if (error)
2072 goto out; 2076 goto out;
2073 2077
2078 error = check_fmode_for_setlk(file_lock);
2079 if (error)
2080 goto out;
2081
2074 /* 2082 /*
2075 * If the cmd is requesting file-private locks, then set the 2083 * If the cmd is requesting file-private locks, then set the
2076 * FL_OFDLCK flag and override the owner. 2084 * FL_OFDLCK flag and override the owner.
@@ -2206,6 +2214,10 @@ again:
2206 if (error) 2214 if (error)
2207 goto out; 2215 goto out;
2208 2216
2217 error = check_fmode_for_setlk(file_lock);
2218 if (error)
2219 goto out;
2220
2209 /* 2221 /*
2210 * If the cmd is requesting file-private locks, then set the 2222 * If the cmd is requesting file-private locks, then set the
2211 * FL_OFDLCK flag and override the owner. 2223 * FL_OFDLCK flag and override the owner.
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 6f3f392d48af..f66c66b9f182 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
402 * by uid/gid. */ 402 * by uid/gid. */
403 int i, j; 403 int i, j;
404 404
405 if (pacl->a_count <= 4) 405 /* no users or groups */
406 return; /* no users or groups */ 406 if (!pacl || pacl->a_count <= 4)
407 return;
408
407 i = 1; 409 i = 1;
408 while (pacl->a_entries[i].e_tag == ACL_USER) 410 while (pacl->a_entries[i].e_tag == ACL_USER)
409 i++; 411 i++;
@@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
530 532
531 /* 533 /*
532 * ACLs with no ACEs are treated differently in the inheritable 534 * ACLs with no ACEs are treated differently in the inheritable
533 * and effective cases: when there are no inheritable ACEs, we 535 * and effective cases: when there are no inheritable ACEs,
534 * set a zero-length default posix acl: 536 * calls ->set_acl with a NULL ACL structure.
535 */ 537 */
536 if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) { 538 if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
537 pacl = posix_acl_alloc(0, GFP_KERNEL); 539 return NULL;
538 return pacl ? pacl : ERR_PTR(-ENOMEM); 540
539 }
540 /* 541 /*
541 * When there are no effective ACEs, the following will end 542 * When there are no effective ACEs, the following will end
542 * up setting a 3-element effective posix ACL with all 543 * up setting a 3-element effective posix ACL with all
@@ -589,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
589 add_to_mask(state, &state->groups->aces[i].perms); 590 add_to_mask(state, &state->groups->aces[i].perms);
590 } 591 }
591 592
592 if (!state->users->n && !state->groups->n) { 593 if (state->users->n || state->groups->n) {
593 pace++; 594 pace++;
594 pace->e_tag = ACL_MASK; 595 pace->e_tag = ACL_MASK;
595 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); 596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3ba65979a3cd..9a77a5a21557 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
1078 return NULL; 1078 return NULL;
1079 } 1079 }
1080 clp->cl_name.len = name.len; 1080 clp->cl_name.len = name.len;
1081 INIT_LIST_HEAD(&clp->cl_sessions);
1082 idr_init(&clp->cl_stateids);
1083 atomic_set(&clp->cl_refcount, 0);
1084 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1085 INIT_LIST_HEAD(&clp->cl_idhash);
1086 INIT_LIST_HEAD(&clp->cl_openowners);
1087 INIT_LIST_HEAD(&clp->cl_delegations);
1088 INIT_LIST_HEAD(&clp->cl_lru);
1089 INIT_LIST_HEAD(&clp->cl_callbacks);
1090 INIT_LIST_HEAD(&clp->cl_revoked);
1091 spin_lock_init(&clp->cl_lock);
1092 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1081 return clp; 1093 return clp;
1082} 1094}
1083 1095
@@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
1095 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 1107 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1096 free_session(ses); 1108 free_session(ses);
1097 } 1109 }
1110 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1098 free_svc_cred(&clp->cl_cred); 1111 free_svc_cred(&clp->cl_cred);
1099 kfree(clp->cl_name.data); 1112 kfree(clp->cl_name.data);
1100 idr_destroy(&clp->cl_stateids); 1113 idr_destroy(&clp->cl_stateids);
@@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1347 if (clp == NULL) 1360 if (clp == NULL)
1348 return NULL; 1361 return NULL;
1349 1362
1350 INIT_LIST_HEAD(&clp->cl_sessions);
1351 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1363 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1352 if (ret) { 1364 if (ret) {
1353 spin_lock(&nn->client_lock); 1365 spin_lock(&nn->client_lock);
@@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1355 spin_unlock(&nn->client_lock); 1367 spin_unlock(&nn->client_lock);
1356 return NULL; 1368 return NULL;
1357 } 1369 }
1358 idr_init(&clp->cl_stateids);
1359 atomic_set(&clp->cl_refcount, 0);
1360 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1361 INIT_LIST_HEAD(&clp->cl_idhash);
1362 INIT_LIST_HEAD(&clp->cl_openowners);
1363 INIT_LIST_HEAD(&clp->cl_delegations);
1364 INIT_LIST_HEAD(&clp->cl_lru);
1365 INIT_LIST_HEAD(&clp->cl_callbacks);
1366 INIT_LIST_HEAD(&clp->cl_revoked);
1367 spin_lock_init(&clp->cl_lock);
1368 nfsd4_init_callback(&clp->cl_cb_null); 1370 nfsd4_init_callback(&clp->cl_cb_null);
1369 clp->cl_time = get_seconds(); 1371 clp->cl_time = get_seconds();
1370 clear_bit(0, &clp->cl_cb_slot_busy); 1372 clear_bit(0, &clp->cl_cb_slot_busy);
1371 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1372 copy_verf(clp, verf); 1373 copy_verf(clp, verf);
1373 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1374 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1374 gen_confirm(clp); 1375 gen_confirm(clp);
@@ -3716,9 +3717,16 @@ out:
3716static __be32 3717static __be32
3717nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3718{ 3719{
3719 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3720 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3721
3722 if (check_for_locks(stp->st_file, lo))
3720 return nfserr_locks_held; 3723 return nfserr_locks_held;
3721 release_lock_stateid(stp); 3724 /*
3725 * Currently there's a 1-1 lock stateid<->lockowner
3726 * correspondance, and we have to delete the lockowner when we
3727 * delete the lock stateid:
3728 */
3729 unhash_lockowner(lo);
3722 return nfs_ok; 3730 return nfs_ok;
3723} 3731}
3724 3732
@@ -4158,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
4158 4166
4159 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4167 if (!same_owner_str(&lo->lo_owner, owner, clid))
4160 return false; 4168 return false;
4169 if (list_empty(&lo->lo_owner.so_stateids)) {
4170 WARN_ON_ONCE(1);
4171 return false;
4172 }
4161 lst = list_first_entry(&lo->lo_owner.so_stateids, 4173 lst = list_first_entry(&lo->lo_owner.so_stateids,
4162 struct nfs4_ol_stateid, st_perstateowner); 4174 struct nfs4_ol_stateid, st_perstateowner);
4163 return lst->st_file->fi_inode == inode; 4175 return lst->st_file->fi_inode == inode;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index af3f7aa73e13..ee1f88419cb0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -472,11 +472,15 @@ bail:
472 472
473void dlm_destroy_master_caches(void) 473void dlm_destroy_master_caches(void)
474{ 474{
475 if (dlm_lockname_cache) 475 if (dlm_lockname_cache) {
476 kmem_cache_destroy(dlm_lockname_cache); 476 kmem_cache_destroy(dlm_lockname_cache);
477 dlm_lockname_cache = NULL;
478 }
477 479
478 if (dlm_lockres_cache) 480 if (dlm_lockres_cache) {
479 kmem_cache_destroy(dlm_lockres_cache); 481 kmem_cache_destroy(dlm_lockres_cache);
482 dlm_lockres_cache = NULL;
483 }
480} 484}
481 485
482static void dlm_lockres_release(struct kref *kref) 486static void dlm_lockres_release(struct kref *kref)
diff --git a/fs/splice.c b/fs/splice.c
index 8e7eef755a9b..f5cb9ba84510 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1548,7 +1548,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1548 struct iovec iovstack[UIO_FASTIOV]; 1548 struct iovec iovstack[UIO_FASTIOV];
1549 struct iovec *iov = iovstack; 1549 struct iovec *iov = iovstack;
1550 struct iov_iter iter; 1550 struct iov_iter iter;
1551 ssize_t count = 0; 1551 ssize_t count;
1552 1552
1553 pipe = get_pipe_info(file); 1553 pipe = get_pipe_info(file);
1554 if (!pipe) 1554 if (!pipe)
@@ -1557,8 +1557,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1557 ret = rw_copy_check_uvector(READ, uiov, nr_segs, 1557 ret = rw_copy_check_uvector(READ, uiov, nr_segs,
1558 ARRAY_SIZE(iovstack), iovstack, &iov); 1558 ARRAY_SIZE(iovstack), iovstack, &iov);
1559 if (ret <= 0) 1559 if (ret <= 0)
1560 return ret; 1560 goto out;
1561 1561
1562 count = ret;
1562 iov_iter_init(&iter, READ, iov, nr_segs, count); 1563 iov_iter_init(&iter, READ, iov, nr_segs, count);
1563 1564
1564 sd.len = 0; 1565 sd.len = 0;
@@ -1571,6 +1572,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1571 ret = __splice_from_pipe(pipe, &sd, pipe_to_user); 1572 ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
1572 pipe_unlock(pipe); 1573 pipe_unlock(pipe);
1573 1574
1575out:
1574 if (iov != iovstack) 1576 if (iov != iovstack)
1575 kfree(iov); 1577 kfree(iov);
1576 1578
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 28cc1acd5439..e9ef59b3abb1 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
47 ssize_t count; 47 ssize_t count;
48 char *buf; 48 char *buf;
49 49
50 /* acquire buffer and ensure that it's >= PAGE_SIZE */ 50 /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
51 count = seq_get_buf(sf, &buf); 51 count = seq_get_buf(sf, &buf);
52 if (count < PAGE_SIZE) { 52 if (count < PAGE_SIZE) {
53 seq_commit(sf, -1); 53 seq_commit(sf, -1);
54 return 0; 54 return 0;
55 } 55 }
56 memset(buf, 0, PAGE_SIZE);
56 57
57 /* 58 /*
58 * Invoke show(). Control may reach here via seq file lseek even 59 * Invoke show(). Control may reach here via seq file lseek even
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index a66ad6196f59..8794423f7efb 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
63{ 63{
64 int err; 64 int err;
65 65
66 sysfs_root = kernfs_create_root(NULL, 0, NULL); 66 sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
67 NULL);
67 if (IS_ERR(sysfs_root)) 68 if (IS_ERR(sysfs_root))
68 return PTR_ERR(sysfs_root); 69 return PTR_ERR(sysfs_root);
69 70
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 01b6a0102fbd..abda1124a70f 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -213,7 +213,7 @@ xfs_attr_calc_size(
213 * Out of line attribute, cannot double split, but 213 * Out of line attribute, cannot double split, but
214 * make room for the attribute value itself. 214 * make room for the attribute value itself.
215 */ 215 */
216 uint dblocks = XFS_B_TO_FSB(mp, valuelen); 216 uint dblocks = xfs_attr3_rmt_blocks(mp, valuelen);
217 nblks += dblocks; 217 nblks += dblocks;
218 nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); 218 nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
219 } 219 }
@@ -698,11 +698,22 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
698 698
699 trace_xfs_attr_leaf_replace(args); 699 trace_xfs_attr_leaf_replace(args);
700 700
701 /* save the attribute state for later removal*/
701 args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */ 702 args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
702 args->blkno2 = args->blkno; /* set 2nd entry info*/ 703 args->blkno2 = args->blkno; /* set 2nd entry info*/
703 args->index2 = args->index; 704 args->index2 = args->index;
704 args->rmtblkno2 = args->rmtblkno; 705 args->rmtblkno2 = args->rmtblkno;
705 args->rmtblkcnt2 = args->rmtblkcnt; 706 args->rmtblkcnt2 = args->rmtblkcnt;
707 args->rmtvaluelen2 = args->rmtvaluelen;
708
709 /*
710 * clear the remote attr state now that it is saved so that the
711 * values reflect the state of the attribute we are about to
712 * add, not the attribute we just found and will remove later.
713 */
714 args->rmtblkno = 0;
715 args->rmtblkcnt = 0;
716 args->rmtvaluelen = 0;
706 } 717 }
707 718
708 /* 719 /*
@@ -794,6 +805,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
794 args->blkno = args->blkno2; 805 args->blkno = args->blkno2;
795 args->rmtblkno = args->rmtblkno2; 806 args->rmtblkno = args->rmtblkno2;
796 args->rmtblkcnt = args->rmtblkcnt2; 807 args->rmtblkcnt = args->rmtblkcnt2;
808 args->rmtvaluelen = args->rmtvaluelen2;
797 if (args->rmtblkno) { 809 if (args->rmtblkno) {
798 error = xfs_attr_rmtval_remove(args); 810 error = xfs_attr_rmtval_remove(args);
799 if (error) 811 if (error)
@@ -999,13 +1011,22 @@ restart:
999 1011
1000 trace_xfs_attr_node_replace(args); 1012 trace_xfs_attr_node_replace(args);
1001 1013
1014 /* save the attribute state for later removal*/
1002 args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ 1015 args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
1003 args->blkno2 = args->blkno; /* set 2nd entry info*/ 1016 args->blkno2 = args->blkno; /* set 2nd entry info*/
1004 args->index2 = args->index; 1017 args->index2 = args->index;
1005 args->rmtblkno2 = args->rmtblkno; 1018 args->rmtblkno2 = args->rmtblkno;
1006 args->rmtblkcnt2 = args->rmtblkcnt; 1019 args->rmtblkcnt2 = args->rmtblkcnt;
1020 args->rmtvaluelen2 = args->rmtvaluelen;
1021
1022 /*
1023 * clear the remote attr state now that it is saved so that the
1024 * values reflect the state of the attribute we are about to
1025 * add, not the attribute we just found and will remove later.
1026 */
1007 args->rmtblkno = 0; 1027 args->rmtblkno = 0;
1008 args->rmtblkcnt = 0; 1028 args->rmtblkcnt = 0;
1029 args->rmtvaluelen = 0;
1009 } 1030 }
1010 1031
1011 retval = xfs_attr3_leaf_add(blk->bp, state->args); 1032 retval = xfs_attr3_leaf_add(blk->bp, state->args);
@@ -1133,6 +1154,7 @@ restart:
1133 args->blkno = args->blkno2; 1154 args->blkno = args->blkno2;
1134 args->rmtblkno = args->rmtblkno2; 1155 args->rmtblkno = args->rmtblkno2;
1135 args->rmtblkcnt = args->rmtblkcnt2; 1156 args->rmtblkcnt = args->rmtblkcnt2;
1157 args->rmtvaluelen = args->rmtvaluelen2;
1136 if (args->rmtblkno) { 1158 if (args->rmtblkno) {
1137 error = xfs_attr_rmtval_remove(args); 1159 error = xfs_attr_rmtval_remove(args);
1138 if (error) 1160 if (error)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index fe9587fab17a..511c283459b1 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -1229,6 +1229,7 @@ xfs_attr3_leaf_add_work(
1229 name_rmt->valueblk = 0; 1229 name_rmt->valueblk = 0;
1230 args->rmtblkno = 1; 1230 args->rmtblkno = 1;
1231 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 1231 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
1232 args->rmtvaluelen = args->valuelen;
1232 } 1233 }
1233 xfs_trans_log_buf(args->trans, bp, 1234 xfs_trans_log_buf(args->trans, bp,
1234 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), 1235 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -2167,11 +2168,11 @@ xfs_attr3_leaf_lookup_int(
2167 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2168 if (!xfs_attr_namesp_match(args->flags, entry->flags))
2168 continue; 2169 continue;
2169 args->index = probe; 2170 args->index = probe;
2170 args->valuelen = be32_to_cpu(name_rmt->valuelen); 2171 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
2171 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2172 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2172 args->rmtblkcnt = xfs_attr3_rmt_blocks( 2173 args->rmtblkcnt = xfs_attr3_rmt_blocks(
2173 args->dp->i_mount, 2174 args->dp->i_mount,
2174 args->valuelen); 2175 args->rmtvaluelen);
2175 return XFS_ERROR(EEXIST); 2176 return XFS_ERROR(EEXIST);
2176 } 2177 }
2177 } 2178 }
@@ -2220,19 +2221,19 @@ xfs_attr3_leaf_getvalue(
2220 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2221 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2221 ASSERT(name_rmt->namelen == args->namelen); 2222 ASSERT(name_rmt->namelen == args->namelen);
2222 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2223 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2223 valuelen = be32_to_cpu(name_rmt->valuelen); 2224 args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
2224 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2225 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2225 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount, 2226 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
2226 valuelen); 2227 args->rmtvaluelen);
2227 if (args->flags & ATTR_KERNOVAL) { 2228 if (args->flags & ATTR_KERNOVAL) {
2228 args->valuelen = valuelen; 2229 args->valuelen = args->rmtvaluelen;
2229 return 0; 2230 return 0;
2230 } 2231 }
2231 if (args->valuelen < valuelen) { 2232 if (args->valuelen < args->rmtvaluelen) {
2232 args->valuelen = valuelen; 2233 args->valuelen = args->rmtvaluelen;
2233 return XFS_ERROR(ERANGE); 2234 return XFS_ERROR(ERANGE);
2234 } 2235 }
2235 args->valuelen = valuelen; 2236 args->valuelen = args->rmtvaluelen;
2236 } 2237 }
2237 return 0; 2238 return 0;
2238} 2239}
@@ -2519,7 +2520,7 @@ xfs_attr3_leaf_clearflag(
2519 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2520 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
2520 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2521 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2521 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2522 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2522 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2523 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
2523 xfs_trans_log_buf(args->trans, bp, 2524 xfs_trans_log_buf(args->trans, bp,
2524 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2525 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2525 } 2526 }
@@ -2677,7 +2678,7 @@ xfs_attr3_leaf_flipflags(
2677 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2678 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2678 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2679 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
2679 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2680 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2680 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2681 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
2681 xfs_trans_log_buf(args->trans, bp1, 2682 xfs_trans_log_buf(args->trans, bp1,
2682 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2683 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2683 } 2684 }
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 01db96f60cf0..833fe5d98d80 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -447,6 +447,7 @@ xfs_attr3_leaf_list_int(
447 args.dp = context->dp; 447 args.dp = context->dp;
448 args.whichfork = XFS_ATTR_FORK; 448 args.whichfork = XFS_ATTR_FORK;
449 args.valuelen = valuelen; 449 args.valuelen = valuelen;
450 args.rmtvaluelen = valuelen;
450 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); 451 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
451 args.rmtblkno = be32_to_cpu(name_rmt->valueblk); 452 args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
452 args.rmtblkcnt = xfs_attr3_rmt_blocks( 453 args.rmtblkcnt = xfs_attr3_rmt_blocks(
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 6e37823e2932..d2e6e948cec7 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -337,7 +337,7 @@ xfs_attr_rmtval_get(
337 struct xfs_buf *bp; 337 struct xfs_buf *bp;
338 xfs_dablk_t lblkno = args->rmtblkno; 338 xfs_dablk_t lblkno = args->rmtblkno;
339 __uint8_t *dst = args->value; 339 __uint8_t *dst = args->value;
340 int valuelen = args->valuelen; 340 int valuelen;
341 int nmap; 341 int nmap;
342 int error; 342 int error;
343 int blkcnt = args->rmtblkcnt; 343 int blkcnt = args->rmtblkcnt;
@@ -347,7 +347,9 @@ xfs_attr_rmtval_get(
347 trace_xfs_attr_rmtval_get(args); 347 trace_xfs_attr_rmtval_get(args);
348 348
349 ASSERT(!(args->flags & ATTR_KERNOVAL)); 349 ASSERT(!(args->flags & ATTR_KERNOVAL));
350 ASSERT(args->rmtvaluelen == args->valuelen);
350 351
352 valuelen = args->rmtvaluelen;
351 while (valuelen > 0) { 353 while (valuelen > 0) {
352 nmap = ATTR_RMTVALUE_MAPSIZE; 354 nmap = ATTR_RMTVALUE_MAPSIZE;
353 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, 355 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
@@ -415,7 +417,7 @@ xfs_attr_rmtval_set(
415 * attributes have headers, we can't just do a straight byte to FSB 417 * attributes have headers, we can't just do a straight byte to FSB
416 * conversion and have to take the header space into account. 418 * conversion and have to take the header space into account.
417 */ 419 */
418 blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 420 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
419 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, 421 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
420 XFS_ATTR_FORK); 422 XFS_ATTR_FORK);
421 if (error) 423 if (error)
@@ -480,7 +482,7 @@ xfs_attr_rmtval_set(
480 */ 482 */
481 lblkno = args->rmtblkno; 483 lblkno = args->rmtblkno;
482 blkcnt = args->rmtblkcnt; 484 blkcnt = args->rmtblkcnt;
483 valuelen = args->valuelen; 485 valuelen = args->rmtvaluelen;
484 while (valuelen > 0) { 486 while (valuelen > 0) {
485 struct xfs_buf *bp; 487 struct xfs_buf *bp;
486 xfs_daddr_t dblkno; 488 xfs_daddr_t dblkno;
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 6e95ea79f5d7..201c6091d26a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -60,10 +60,12 @@ typedef struct xfs_da_args {
60 int index; /* index of attr of interest in blk */ 60 int index; /* index of attr of interest in blk */
61 xfs_dablk_t rmtblkno; /* remote attr value starting blkno */ 61 xfs_dablk_t rmtblkno; /* remote attr value starting blkno */
62 int rmtblkcnt; /* remote attr value block count */ 62 int rmtblkcnt; /* remote attr value block count */
63 int rmtvaluelen; /* remote attr value length in bytes */
63 xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */ 64 xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */
64 int index2; /* index of 2nd attr in blk */ 65 int index2; /* index of 2nd attr in blk */
65 xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ 66 xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */
66 int rmtblkcnt2; /* remote attr value block count */ 67 int rmtblkcnt2; /* remote attr value block count */
68 int rmtvaluelen2; /* remote attr value length in bytes */
67 int op_flags; /* operation flags */ 69 int op_flags; /* operation flags */
68 enum xfs_dacmp cmpresult; /* name compare result for lookups */ 70 enum xfs_dacmp cmpresult; /* name compare result for lookups */
69} xfs_da_args_t; 71} xfs_da_args_t;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 1399e187d425..753e467aa1a5 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
237 237
238 if (!lsn) 238 if (!lsn)
239 return 0; 239 return 0;
240 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 240 return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
241} 241}
242 242
243const struct export_operations xfs_export_operations = { 243const struct export_operations xfs_export_operations = {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index b1c489c1fb2e..500c3f0656d0 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -155,7 +155,7 @@ xfs_dir_fsync(
155 155
156 if (!lsn) 156 if (!lsn)
157 return 0; 157 return 0;
158 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 158 return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
159} 159}
160 160
161STATIC int 161STATIC int
@@ -288,7 +288,7 @@ xfs_file_read_iter(
288 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 288 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
289 289
290 if (inode->i_mapping->nrpages) { 290 if (inode->i_mapping->nrpages) {
291 ret = -filemap_write_and_wait_range( 291 ret = filemap_write_and_wait_range(
292 VFS_I(ip)->i_mapping, 292 VFS_I(ip)->i_mapping,
293 pos, -1); 293 pos, -1);
294 if (ret) { 294 if (ret) {
@@ -772,7 +772,7 @@ xfs_file_fallocate(
772 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 772 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
773 773
774 if (offset & blksize_mask || len & blksize_mask) { 774 if (offset & blksize_mask || len & blksize_mask) {
775 error = -EINVAL; 775 error = EINVAL;
776 goto out_unlock; 776 goto out_unlock;
777 } 777 }
778 778
@@ -781,7 +781,7 @@ xfs_file_fallocate(
781 * in which case it is effectively a truncate operation 781 * in which case it is effectively a truncate operation
782 */ 782 */
783 if (offset + len >= i_size_read(inode)) { 783 if (offset + len >= i_size_read(inode)) {
784 error = -EINVAL; 784 error = EINVAL;
785 goto out_unlock; 785 goto out_unlock;
786 } 786 }
787 787
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ef1ca010f417..36d630319a27 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -72,8 +72,8 @@ xfs_initxattrs(
72 int error = 0; 72 int error = 0;
73 73
74 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 74 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
75 error = xfs_attr_set(ip, xattr->name, xattr->value, 75 error = -xfs_attr_set(ip, xattr->name, xattr->value,
76 xattr->value_len, ATTR_SECURE); 76 xattr->value_len, ATTR_SECURE);
77 if (error < 0) 77 if (error < 0)
78 break; 78 break;
79 } 79 }
@@ -93,8 +93,8 @@ xfs_init_security(
93 struct inode *dir, 93 struct inode *dir,
94 const struct qstr *qstr) 94 const struct qstr *qstr)
95{ 95{
96 return security_inode_init_security(inode, dir, qstr, 96 return -security_inode_init_security(inode, dir, qstr,
97 &xfs_initxattrs, NULL); 97 &xfs_initxattrs, NULL);
98} 98}
99 99
100static void 100static void
@@ -124,15 +124,15 @@ xfs_cleanup_inode(
124 xfs_dentry_to_name(&teardown, dentry, 0); 124 xfs_dentry_to_name(&teardown, dentry, 0);
125 125
126 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 126 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
127 iput(inode);
128} 127}
129 128
130STATIC int 129STATIC int
131xfs_vn_mknod( 130xfs_generic_create(
132 struct inode *dir, 131 struct inode *dir,
133 struct dentry *dentry, 132 struct dentry *dentry,
134 umode_t mode, 133 umode_t mode,
135 dev_t rdev) 134 dev_t rdev,
135 bool tmpfile) /* unnamed file */
136{ 136{
137 struct inode *inode; 137 struct inode *inode;
138 struct xfs_inode *ip = NULL; 138 struct xfs_inode *ip = NULL;
@@ -156,8 +156,12 @@ xfs_vn_mknod(
156 if (error) 156 if (error)
157 return error; 157 return error;
158 158
159 xfs_dentry_to_name(&name, dentry, mode); 159 if (!tmpfile) {
160 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 160 xfs_dentry_to_name(&name, dentry, mode);
161 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
162 } else {
163 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
164 }
161 if (unlikely(error)) 165 if (unlikely(error))
162 goto out_free_acl; 166 goto out_free_acl;
163 167
@@ -169,18 +173,22 @@ xfs_vn_mknod(
169 173
170#ifdef CONFIG_XFS_POSIX_ACL 174#ifdef CONFIG_XFS_POSIX_ACL
171 if (default_acl) { 175 if (default_acl) {
172 error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 176 error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
173 if (error) 177 if (error)
174 goto out_cleanup_inode; 178 goto out_cleanup_inode;
175 } 179 }
176 if (acl) { 180 if (acl) {
177 error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 181 error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
178 if (error) 182 if (error)
179 goto out_cleanup_inode; 183 goto out_cleanup_inode;
180 } 184 }
181#endif 185#endif
182 186
183 d_instantiate(dentry, inode); 187 if (tmpfile)
188 d_tmpfile(dentry, inode);
189 else
190 d_instantiate(dentry, inode);
191
184 out_free_acl: 192 out_free_acl:
185 if (default_acl) 193 if (default_acl)
186 posix_acl_release(default_acl); 194 posix_acl_release(default_acl);
@@ -189,11 +197,23 @@ xfs_vn_mknod(
189 return -error; 197 return -error;
190 198
191 out_cleanup_inode: 199 out_cleanup_inode:
192 xfs_cleanup_inode(dir, inode, dentry); 200 if (!tmpfile)
201 xfs_cleanup_inode(dir, inode, dentry);
202 iput(inode);
193 goto out_free_acl; 203 goto out_free_acl;
194} 204}
195 205
196STATIC int 206STATIC int
207xfs_vn_mknod(
208 struct inode *dir,
209 struct dentry *dentry,
210 umode_t mode,
211 dev_t rdev)
212{
213 return xfs_generic_create(dir, dentry, mode, rdev, false);
214}
215
216STATIC int
197xfs_vn_create( 217xfs_vn_create(
198 struct inode *dir, 218 struct inode *dir,
199 struct dentry *dentry, 219 struct dentry *dentry,
@@ -353,6 +373,7 @@ xfs_vn_symlink(
353 373
354 out_cleanup_inode: 374 out_cleanup_inode:
355 xfs_cleanup_inode(dir, inode, dentry); 375 xfs_cleanup_inode(dir, inode, dentry);
376 iput(inode);
356 out: 377 out:
357 return -error; 378 return -error;
358} 379}
@@ -1053,25 +1074,7 @@ xfs_vn_tmpfile(
1053 struct dentry *dentry, 1074 struct dentry *dentry,
1054 umode_t mode) 1075 umode_t mode)
1055{ 1076{
1056 int error; 1077 return xfs_generic_create(dir, dentry, mode, 0, true);
1057 struct xfs_inode *ip;
1058 struct inode *inode;
1059
1060 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
1061 if (unlikely(error))
1062 return -error;
1063
1064 inode = VFS_I(ip);
1065
1066 error = xfs_init_security(inode, dir, &dentry->d_name);
1067 if (unlikely(error)) {
1068 iput(inode);
1069 return -error;
1070 }
1071
1072 d_tmpfile(dentry, inode);
1073
1074 return 0;
1075} 1078}
1076 1079
1077static const struct inode_operations xfs_inode_operations = { 1080static const struct inode_operations xfs_inode_operations = {
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 08624dc67317..a5f8bd9899d3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -616,11 +616,13 @@ xfs_log_mount(
616 int error = 0; 616 int error = 0;
617 int min_logfsbs; 617 int min_logfsbs;
618 618
619 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 619 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
620 xfs_notice(mp, "Mounting Filesystem"); 620 xfs_notice(mp, "Mounting V%d Filesystem",
621 else { 621 XFS_SB_VERSION_NUM(&mp->m_sb));
622 } else {
622 xfs_notice(mp, 623 xfs_notice(mp,
623"Mounting filesystem in no-recovery mode. Filesystem will be inconsistent."); 624"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
625 XFS_SB_VERSION_NUM(&mp->m_sb));
624 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 626 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
625 } 627 }
626 628
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 993cb19e7d39..944f3d9456a8 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -743,8 +743,6 @@ xfs_mountfs(
743 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 743 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
744 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 744 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
745 mp->m_inode_cluster_size = new_size; 745 mp->m_inode_cluster_size = new_size;
746 xfs_info(mp, "Using inode cluster size of %d bytes",
747 mp->m_inode_cluster_size);
748 } 746 }
749 747
750 /* 748 /*
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 348e4d2ed6e6..dc977b6e6a36 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
843 843
844 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 844 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
845 845
846 if ((error = list_lru_init(&qinf->qi_lru))) { 846 error = -list_lru_init(&qinf->qi_lru);
847 kmem_free(qinf); 847 if (error)
848 mp->m_quotainfo = NULL; 848 goto out_free_qinf;
849 return error;
850 }
851 849
852 /* 850 /*
853 * See if quotainodes are setup, and if not, allocate them, 851 * See if quotainodes are setup, and if not, allocate them,
854 * and change the superblock accordingly. 852 * and change the superblock accordingly.
855 */ 853 */
856 if ((error = xfs_qm_init_quotainos(mp))) { 854 error = xfs_qm_init_quotainos(mp);
857 list_lru_destroy(&qinf->qi_lru); 855 if (error)
858 kmem_free(qinf); 856 goto out_free_lru;
859 mp->m_quotainfo = NULL;
860 return error;
861 }
862 857
863 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 858 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
864 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 859 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
918 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 913 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
919 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 914 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
920 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 915 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
921 916
922 xfs_qm_dqdestroy(dqp); 917 xfs_qm_dqdestroy(dqp);
923 } else { 918 } else {
924 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 919 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
935 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 930 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
936 register_shrinker(&qinf->qi_shrinker); 931 register_shrinker(&qinf->qi_shrinker);
937 return 0; 932 return 0;
933
934out_free_lru:
935 list_lru_destroy(&qinf->qi_lru);
936out_free_qinf:
937 kmem_free(qinf);
938 mp->m_quotainfo = NULL;
939 return error;
938} 940}
939 941
940 942
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index 0c0e41bbe4e3..8baf61afae1d 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -201,10 +201,6 @@ xfs_mount_validate_sb(
201 * write validation, we don't need to check feature masks. 201 * write validation, we don't need to check feature masks.
202 */ 202 */
203 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { 203 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
204 xfs_alert(mp,
205"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
206"Use of these features in this kernel is at your own risk!");
207
208 if (xfs_sb_has_compat_feature(sbp, 204 if (xfs_sb_has_compat_feature(sbp,
209 XFS_SB_FEAT_COMPAT_UNKNOWN)) { 205 XFS_SB_FEAT_COMPAT_UNKNOWN)) {
210 xfs_warn(mp, 206 xfs_warn(mp,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 205376776377..3494eff8e4eb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
1433 if (error) 1433 if (error)
1434 goto out_free_fsname; 1434 goto out_free_fsname;
1435 1435
1436 error = xfs_init_mount_workqueues(mp); 1436 error = -xfs_init_mount_workqueues(mp);
1437 if (error) 1437 if (error)
1438 goto out_close_devices; 1438 goto out_close_devices;
1439 1439
1440 error = xfs_icsb_init_counters(mp); 1440 error = -xfs_icsb_init_counters(mp);
1441 if (error) 1441 if (error)
1442 goto out_destroy_workqueues; 1442 goto out_destroy_workqueues;
1443 1443