aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/bio.c28
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/async-thread.c81
-rw-r--r--fs/btrfs/async-thread.h10
-rw-r--r--fs/btrfs/btrfs_inode.h18
-rw-r--r--fs/btrfs/ctree.h19
-rw-r--r--fs/btrfs/disk-io.c50
-rw-r--r--fs/btrfs/extent-tree.c252
-rw-r--r--fs/btrfs/extent_io.c42
-rw-r--r--fs/btrfs/extent_io.h18
-rw-r--r--fs/btrfs/file.c44
-rw-r--r--fs/btrfs/inode.c144
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/ordered-data.c6
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/transaction.c45
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-log.c56
-rw-r--r--fs/btrfs/tree-log.h3
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/dlm/lowcomms.c36
-rw-r--r--fs/ecryptfs/Kconfig3
-rw-r--r--fs/ecryptfs/main.c7
-rw-r--r--fs/ext3/super.c13
-rw-r--r--fs/file.c1
-rw-r--r--fs/hfs/btree.c5
-rw-r--r--fs/hfsplus/wrapper.c4
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c1
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c15
-rw-r--r--fs/nfs/nfs4renewd.c6
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/super.c37
-rw-r--r--fs/notify/dnotify/dnotify.c3
-rw-r--r--fs/notify/inode_mark.c6
-rw-r--r--fs/notify/notification.c2
-rw-r--r--fs/pipe.c41
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/romfs/storage.c4
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c41
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c59
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c36
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c1
-rw-r--r--fs/xfs/xfs_dfrag.c8
-rw-r--r--fs/xfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/xfs_ialloc.c1
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c18
-rw-r--r--fs/xfs/xfs_itable.c21
-rw-r--r--fs/xfs/xfs_vnodeops.c6
65 files changed, 933 insertions, 403 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index d4bf8caad8d0..2126078a38ed 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -135,8 +135,8 @@ config TMPFS_POSIX_ACL
135 135
136config HUGETLBFS 136config HUGETLBFS
137 bool "HugeTLB file system support" 137 bool "HugeTLB file system support"
138 depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || \ 138 depends on X86 || IA64 || PPC_BOOK3S_64 || SPARC64 || (S390 && 64BIT) || \
139 (S390 && 64BIT) || SYS_SUPPORTS_HUGETLBFS || BROKEN 139 SYS_SUPPORTS_HUGETLBFS || BROKEN
140 help 140 help
141 hugetlbfs is a filesystem backing for HugeTLB pages, based on 141 hugetlbfs is a filesystem backing for HugeTLB pages, based on
142 ramfs. For architectures that support it, say Y here and read 142 ramfs. For architectures that support it, say Y here and read
diff --git a/fs/bio.c b/fs/bio.c
index 402cb84a92a1..12da5db8682c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
325 * @gfp_mask: allocation mask to use 325 * @gfp_mask: allocation mask to use
326 * @nr_iovecs: number of iovecs 326 * @nr_iovecs: number of iovecs
327 * 327 *
328 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask 328 * bio_alloc will allocate a bio and associated bio_vec array that can hold
329 * contains __GFP_WAIT, the allocation is guaranteed to succeed. 329 * at least @nr_iovecs entries. Allocations will be done from the
330 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
331 *
332 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
333 * a bio. This is due to the mempool guarantees. To make this work, callers
334 * must never allocate more than 1 bio at a time from this pool. Callers
335 * that need to allocate more than 1 bio must always submit the previously
336 * allocated bio for IO before attempting to allocate a new one. Failure to
337 * do so can cause livelocks under memory pressure.
330 * 338 *
331 * RETURNS: 339 * RETURNS:
332 * Pointer to new bio on success, NULL on failure. 340 * Pointer to new bio on success, NULL on failure.
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
350} 358}
351 359
352/** 360/**
353 * bio_alloc - allocate a bio for I/O 361 * bio_kmalloc - allocate a bio for I/O using kmalloc()
354 * @gfp_mask: the GFP_ mask given to the slab allocator 362 * @gfp_mask: the GFP_ mask given to the slab allocator
355 * @nr_iovecs: number of iovecs to pre-allocate 363 * @nr_iovecs: number of iovecs to pre-allocate
356 * 364 *
357 * Description: 365 * Description:
358 * bio_alloc will allocate a bio and associated bio_vec array that can hold 366 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
359 * at least @nr_iovecs entries. Allocations will be done from the 367 * %__GFP_WAIT, the allocation is guaranteed to succeed.
360 * fs_bio_set. Also see @bio_alloc_bioset.
361 *
362 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
363 * a bio. This is due to the mempool guarantees. To make this work, callers
364 * must never allocate more than 1 bio at a time from this pool. Callers
365 * that need to allocate more than 1 bio must always submit the previously
366 * allocated bio for IO before attempting to allocate a new one. Failure to
367 * do so can cause livelocks under memory pressure.
368 * 368 *
369 **/ 369 **/
370struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) 370struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
407 * 407 *
408 * Description: 408 * Description:
409 * Put a reference to a &struct bio, either one you have gotten with 409 * Put a reference to a &struct bio, either one you have gotten with
410 * bio_alloc or bio_get. The last put of a bio will free it. 410 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
411 **/ 411 **/
412void bio_put(struct bio *bio) 412void bio_put(struct bio *bio)
413{ 413{
diff --git a/fs/block_dev.c b/fs/block_dev.c
index dde91e7e1c3a..73d6a735b8f3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1258,8 +1258,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1258 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1258 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1259 } 1259 }
1260 } else { 1260 } else {
1261 put_disk(disk);
1262 module_put(disk->fops->owner); 1261 module_put(disk->fops->owner);
1262 put_disk(disk);
1263 disk = NULL; 1263 disk = NULL;
1264 if (bdev->bd_contains == bdev) { 1264 if (bdev->bd_contains == bdev) {
1265 if (bdev->bd_disk->fops->open) { 1265 if (bdev->bd_disk->fops->open) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 69b355ae7f49..361604244271 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
27#include "btrfs_inode.h" 27#include "btrfs_inode.h"
28#include "xattr.h" 28#include "xattr.h"
29 29
30#ifdef CONFIG_BTRFS_POSIX_ACL 30#ifdef CONFIG_BTRFS_FS_POSIX_ACL
31 31
32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
33{ 33{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
313 .set = btrfs_xattr_acl_access_set, 313 .set = btrfs_xattr_acl_access_set,
314}; 314};
315 315
316#else /* CONFIG_BTRFS_POSIX_ACL */ 316#else /* CONFIG_BTRFS_FS_POSIX_ACL */
317 317
318int btrfs_acl_chmod(struct inode *inode) 318int btrfs_acl_chmod(struct inode *inode)
319{ 319{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
325 return 0; 325 return 0;
326} 326}
327 327
328#endif /* CONFIG_BTRFS_POSIX_ACL */ 328#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 282ca085c2fb..c0861e781cdb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,51 @@ struct btrfs_worker_thread {
64}; 64};
65 65
66/* 66/*
67 * btrfs_start_workers uses kthread_run, which can block waiting for memory
68 * for a very long time. It will actually throttle on page writeback,
69 * and so it may not make progress until after our btrfs worker threads
70 * process all of the pending work structs in their queue
71 *
72 * This means we can't use btrfs_start_workers from inside a btrfs worker
73 * thread that is used as part of cleaning dirty memory, which pretty much
74 * involves all of the worker threads.
75 *
76 * Instead we have a helper queue who never has more than one thread
77 * where we scheduler thread start operations. This worker_start struct
78 * is used to contain the work and hold a pointer to the queue that needs
79 * another worker.
80 */
81struct worker_start {
82 struct btrfs_work work;
83 struct btrfs_workers *queue;
84};
85
86static void start_new_worker_func(struct btrfs_work *work)
87{
88 struct worker_start *start;
89 start = container_of(work, struct worker_start, work);
90 btrfs_start_workers(start->queue, 1);
91 kfree(start);
92}
93
94static int start_new_worker(struct btrfs_workers *queue)
95{
96 struct worker_start *start;
97 int ret;
98
99 start = kzalloc(sizeof(*start), GFP_NOFS);
100 if (!start)
101 return -ENOMEM;
102
103 start->work.func = start_new_worker_func;
104 start->queue = queue;
105 ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
106 if (ret)
107 kfree(start);
108 return ret;
109}
110
111/*
67 * helper function to move a thread onto the idle list after it 112 * helper function to move a thread onto the idle list after it
68 * has finished some requests. 113 * has finished some requests.
69 */ 114 */
@@ -118,11 +163,13 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
118 goto out; 163 goto out;
119 164
120 workers->atomic_start_pending = 0; 165 workers->atomic_start_pending = 0;
121 if (workers->num_workers >= workers->max_workers) 166 if (workers->num_workers + workers->num_workers_starting >=
167 workers->max_workers)
122 goto out; 168 goto out;
123 169
170 workers->num_workers_starting += 1;
124 spin_unlock_irqrestore(&workers->lock, flags); 171 spin_unlock_irqrestore(&workers->lock, flags);
125 btrfs_start_workers(workers, 1); 172 start_new_worker(workers);
126 return; 173 return;
127 174
128out: 175out:
@@ -390,9 +437,11 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
390/* 437/*
391 * simple init on struct btrfs_workers 438 * simple init on struct btrfs_workers
392 */ 439 */
393void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) 440void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
441 struct btrfs_workers *async_helper)
394{ 442{
395 workers->num_workers = 0; 443 workers->num_workers = 0;
444 workers->num_workers_starting = 0;
396 INIT_LIST_HEAD(&workers->worker_list); 445 INIT_LIST_HEAD(&workers->worker_list);
397 INIT_LIST_HEAD(&workers->idle_list); 446 INIT_LIST_HEAD(&workers->idle_list);
398 INIT_LIST_HEAD(&workers->order_list); 447 INIT_LIST_HEAD(&workers->order_list);
@@ -404,14 +453,15 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
404 workers->name = name; 453 workers->name = name;
405 workers->ordered = 0; 454 workers->ordered = 0;
406 workers->atomic_start_pending = 0; 455 workers->atomic_start_pending = 0;
407 workers->atomic_worker_start = 0; 456 workers->atomic_worker_start = async_helper;
408} 457}
409 458
410/* 459/*
411 * starts new worker threads. This does not enforce the max worker 460 * starts new worker threads. This does not enforce the max worker
412 * count in case you need to temporarily go past it. 461 * count in case you need to temporarily go past it.
413 */ 462 */
414int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) 463static int __btrfs_start_workers(struct btrfs_workers *workers,
464 int num_workers)
415{ 465{
416 struct btrfs_worker_thread *worker; 466 struct btrfs_worker_thread *worker;
417 int ret = 0; 467 int ret = 0;
@@ -444,6 +494,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
444 list_add_tail(&worker->worker_list, &workers->idle_list); 494 list_add_tail(&worker->worker_list, &workers->idle_list);
445 worker->idle = 1; 495 worker->idle = 1;
446 workers->num_workers++; 496 workers->num_workers++;
497 workers->num_workers_starting--;
498 WARN_ON(workers->num_workers_starting < 0);
447 spin_unlock_irq(&workers->lock); 499 spin_unlock_irq(&workers->lock);
448 } 500 }
449 return 0; 501 return 0;
@@ -452,6 +504,14 @@ fail:
452 return ret; 504 return ret;
453} 505}
454 506
507int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
508{
509 spin_lock_irq(&workers->lock);
510 workers->num_workers_starting += num_workers;
511 spin_unlock_irq(&workers->lock);
512 return __btrfs_start_workers(workers, num_workers);
513}
514
455/* 515/*
456 * run through the list and find a worker thread that doesn't have a lot 516 * run through the list and find a worker thread that doesn't have a lot
457 * to do right now. This can return null if we aren't yet at the thread 517 * to do right now. This can return null if we aren't yet at the thread
@@ -461,7 +521,10 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
461{ 521{
462 struct btrfs_worker_thread *worker; 522 struct btrfs_worker_thread *worker;
463 struct list_head *next; 523 struct list_head *next;
464 int enforce_min = workers->num_workers < workers->max_workers; 524 int enforce_min;
525
526 enforce_min = (workers->num_workers + workers->num_workers_starting) <
527 workers->max_workers;
465 528
466 /* 529 /*
467 * if we find an idle thread, don't move it to the end of the 530 * if we find an idle thread, don't move it to the end of the
@@ -509,15 +572,17 @@ again:
509 worker = next_worker(workers); 572 worker = next_worker(workers);
510 573
511 if (!worker) { 574 if (!worker) {
512 if (workers->num_workers >= workers->max_workers) { 575 if (workers->num_workers + workers->num_workers_starting >=
576 workers->max_workers) {
513 goto fallback; 577 goto fallback;
514 } else if (workers->atomic_worker_start) { 578 } else if (workers->atomic_worker_start) {
515 workers->atomic_start_pending = 1; 579 workers->atomic_start_pending = 1;
516 goto fallback; 580 goto fallback;
517 } else { 581 } else {
582 workers->num_workers_starting++;
518 spin_unlock_irqrestore(&workers->lock, flags); 583 spin_unlock_irqrestore(&workers->lock, flags);
519 /* we're below the limit, start another worker */ 584 /* we're below the limit, start another worker */
520 btrfs_start_workers(workers, 1); 585 __btrfs_start_workers(workers, 1);
521 goto again; 586 goto again;
522 } 587 }
523 } 588 }
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index fc089b95ec14..5077746cf85e 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -64,6 +64,8 @@ struct btrfs_workers {
64 /* current number of running workers */ 64 /* current number of running workers */
65 int num_workers; 65 int num_workers;
66 66
67 int num_workers_starting;
68
67 /* max number of workers allowed. changed by btrfs_start_workers */ 69 /* max number of workers allowed. changed by btrfs_start_workers */
68 int max_workers; 70 int max_workers;
69 71
@@ -78,9 +80,10 @@ struct btrfs_workers {
78 80
79 /* 81 /*
80 * are we allowed to sleep while starting workers or are we required 82 * are we allowed to sleep while starting workers or are we required
81 * to start them at a later time? 83 * to start them at a later time? If we can't sleep, this indicates
84 * which queue we need to use to schedule thread creation.
82 */ 85 */
83 int atomic_worker_start; 86 struct btrfs_workers *atomic_worker_start;
84 87
85 /* list with all the work threads. The workers on the idle thread 88 /* list with all the work threads. The workers on the idle thread
86 * may be actively servicing jobs, but they haven't yet hit the 89 * may be actively servicing jobs, but they haven't yet hit the
@@ -109,7 +112,8 @@ struct btrfs_workers {
109int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 112int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
110int btrfs_start_workers(struct btrfs_workers *workers, int num_workers); 113int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
111int btrfs_stop_workers(struct btrfs_workers *workers); 114int btrfs_stop_workers(struct btrfs_workers *workers);
112void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max); 115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
116 struct btrfs_workers *async_starter);
113int btrfs_requeue_work(struct btrfs_work *work); 117int btrfs_requeue_work(struct btrfs_work *work);
114void btrfs_set_work_high_prio(struct btrfs_work *work); 118void btrfs_set_work_high_prio(struct btrfs_work *work);
115#endif 119#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index a54d354cefcb..f6783a42f010 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -86,6 +86,12 @@ struct btrfs_inode {
86 * transid of the trans_handle that last modified this inode 86 * transid of the trans_handle that last modified this inode
87 */ 87 */
88 u64 last_trans; 88 u64 last_trans;
89
90 /*
91 * log transid when this inode was last modified
92 */
93 u64 last_sub_trans;
94
89 /* 95 /*
90 * transid that last logged this inode 96 * transid that last logged this inode
91 */ 97 */
@@ -128,12 +134,14 @@ struct btrfs_inode {
128 u64 last_unlink_trans; 134 u64 last_unlink_trans;
129 135
130 /* 136 /*
131 * These two counters are for delalloc metadata reservations. We keep 137 * Counters to keep track of the number of extent item's we may use due
132 * track of how many extents we've accounted for vs how many extents we 138 * to delalloc and such. outstanding_extents is the number of extent
133 * have. 139 * items we think we'll end up using, and reserved_extents is the number
140 * of extent items we've reserved metadata for.
134 */ 141 */
135 int delalloc_reserved_extents; 142 spinlock_t accounting_lock;
136 int delalloc_extents; 143 int reserved_extents;
144 int outstanding_extents;
137 145
138 /* 146 /*
139 * ordered_data_close is set by truncate when a file that used 147 * ordered_data_close is set by truncate when a file that used
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index dd8ced9814c4..444b3e9b92a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -691,14 +691,17 @@ struct btrfs_space_info {
691 691
692 struct list_head list; 692 struct list_head list;
693 693
694 /* for controlling how we free up space for allocations */
695 wait_queue_head_t allocate_wait;
696 wait_queue_head_t flush_wait;
697 int allocating_chunk;
698 int flushing;
699
694 /* for block groups in our same type */ 700 /* for block groups in our same type */
695 struct list_head block_groups; 701 struct list_head block_groups;
696 spinlock_t lock; 702 spinlock_t lock;
697 struct rw_semaphore groups_sem; 703 struct rw_semaphore groups_sem;
698 atomic_t caching_threads; 704 atomic_t caching_threads;
699
700 int allocating_chunk;
701 wait_queue_head_t wait;
702}; 705};
703 706
704/* 707/*
@@ -907,6 +910,7 @@ struct btrfs_fs_info {
907 * A third pool does submit_bio to avoid deadlocking with the other 910 * A third pool does submit_bio to avoid deadlocking with the other
908 * two 911 * two
909 */ 912 */
913 struct btrfs_workers generic_worker;
910 struct btrfs_workers workers; 914 struct btrfs_workers workers;
911 struct btrfs_workers delalloc_workers; 915 struct btrfs_workers delalloc_workers;
912 struct btrfs_workers endio_workers; 916 struct btrfs_workers endio_workers;
@@ -914,6 +918,7 @@ struct btrfs_fs_info {
914 struct btrfs_workers endio_meta_write_workers; 918 struct btrfs_workers endio_meta_write_workers;
915 struct btrfs_workers endio_write_workers; 919 struct btrfs_workers endio_write_workers;
916 struct btrfs_workers submit_workers; 920 struct btrfs_workers submit_workers;
921 struct btrfs_workers enospc_workers;
917 /* 922 /*
918 * fixup workers take dirty pages that didn't properly go through 923 * fixup workers take dirty pages that didn't properly go through
919 * the cow mechanism and make them safe to write. It happens 924 * the cow mechanism and make them safe to write. It happens
@@ -1004,7 +1009,10 @@ struct btrfs_root {
1004 atomic_t log_writers; 1009 atomic_t log_writers;
1005 atomic_t log_commit[2]; 1010 atomic_t log_commit[2];
1006 unsigned long log_transid; 1011 unsigned long log_transid;
1012 unsigned long last_log_commit;
1007 unsigned long log_batch; 1013 unsigned long log_batch;
1014 pid_t log_start_pid;
1015 bool log_multiple_pids;
1008 1016
1009 u64 objectid; 1017 u64 objectid;
1010 u64 last_trans; 1018 u64 last_trans;
@@ -1145,6 +1153,7 @@ struct btrfs_root {
1145#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 1153#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7)
1146#define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 1154#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
1147#define BTRFS_MOUNT_NOSSD (1 << 9) 1155#define BTRFS_MOUNT_NOSSD (1 << 9)
1156#define BTRFS_MOUNT_DISCARD (1 << 10)
1148 1157
1149#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1158#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1150#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1159#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2323,7 +2332,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
2323void btrfs_orphan_cleanup(struct btrfs_root *root); 2332void btrfs_orphan_cleanup(struct btrfs_root *root);
2324int btrfs_cont_expand(struct inode *inode, loff_t size); 2333int btrfs_cont_expand(struct inode *inode, loff_t size);
2325int btrfs_invalidate_inodes(struct btrfs_root *root); 2334int btrfs_invalidate_inodes(struct btrfs_root *root);
2326extern struct dentry_operations btrfs_dentry_operations; 2335extern const struct dentry_operations btrfs_dentry_operations;
2327 2336
2328/* ioctl.c */ 2337/* ioctl.c */
2329long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 2338long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -2366,7 +2375,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
2366int btrfs_sync_fs(struct super_block *sb, int wait); 2375int btrfs_sync_fs(struct super_block *sb, int wait);
2367 2376
2368/* acl.c */ 2377/* acl.c */
2369#ifdef CONFIG_BTRFS_POSIX_ACL 2378#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2370int btrfs_check_acl(struct inode *inode, int mask); 2379int btrfs_check_acl(struct inode *inode, int mask);
2371#else 2380#else
2372#define btrfs_check_acl NULL 2381#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index af0435f79fa6..02b6afbd7450 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -917,6 +917,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
917 atomic_set(&root->log_writers, 0); 917 atomic_set(&root->log_writers, 0);
918 root->log_batch = 0; 918 root->log_batch = 0;
919 root->log_transid = 0; 919 root->log_transid = 0;
920 root->last_log_commit = 0;
920 extent_io_tree_init(&root->dirty_log_pages, 921 extent_io_tree_init(&root->dirty_log_pages,
921 fs_info->btree_inode->i_mapping, GFP_NOFS); 922 fs_info->btree_inode->i_mapping, GFP_NOFS);
922 923
@@ -1087,6 +1088,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1087 WARN_ON(root->log_root); 1088 WARN_ON(root->log_root);
1088 root->log_root = log_root; 1089 root->log_root = log_root;
1089 root->log_transid = 0; 1090 root->log_transid = 0;
1091 root->last_log_commit = 0;
1090 return 0; 1092 return 0;
1091} 1093}
1092 1094
@@ -1746,21 +1748,25 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1746 err = -EINVAL; 1748 err = -EINVAL;
1747 goto fail_iput; 1749 goto fail_iput;
1748 } 1750 }
1749printk("thread pool is %d\n", fs_info->thread_pool_size); 1751
1750 /* 1752 btrfs_init_workers(&fs_info->generic_worker,
1751 * we need to start all the end_io workers up front because the 1753 "genwork", 1, NULL);
1752 * queue work function gets called at interrupt time, and so it 1754
1753 * cannot dynamically grow.
1754 */
1755 btrfs_init_workers(&fs_info->workers, "worker", 1755 btrfs_init_workers(&fs_info->workers, "worker",
1756 fs_info->thread_pool_size); 1756 fs_info->thread_pool_size,
1757 &fs_info->generic_worker);
1757 1758
1758 btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", 1759 btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1759 fs_info->thread_pool_size); 1760 fs_info->thread_pool_size,
1761 &fs_info->generic_worker);
1760 1762
1761 btrfs_init_workers(&fs_info->submit_workers, "submit", 1763 btrfs_init_workers(&fs_info->submit_workers, "submit",
1762 min_t(u64, fs_devices->num_devices, 1764 min_t(u64, fs_devices->num_devices,
1763 fs_info->thread_pool_size)); 1765 fs_info->thread_pool_size),
1766 &fs_info->generic_worker);
1767 btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1768 fs_info->thread_pool_size,
1769 &fs_info->generic_worker);
1764 1770
1765 /* a higher idle thresh on the submit workers makes it much more 1771 /* a higher idle thresh on the submit workers makes it much more
1766 * likely that bios will be send down in a sane order to the 1772 * likely that bios will be send down in a sane order to the
@@ -1774,15 +1780,20 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1774 fs_info->delalloc_workers.idle_thresh = 2; 1780 fs_info->delalloc_workers.idle_thresh = 2;
1775 fs_info->delalloc_workers.ordered = 1; 1781 fs_info->delalloc_workers.ordered = 1;
1776 1782
1777 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); 1783 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1784 &fs_info->generic_worker);
1778 btrfs_init_workers(&fs_info->endio_workers, "endio", 1785 btrfs_init_workers(&fs_info->endio_workers, "endio",
1779 fs_info->thread_pool_size); 1786 fs_info->thread_pool_size,
1787 &fs_info->generic_worker);
1780 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", 1788 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1781 fs_info->thread_pool_size); 1789 fs_info->thread_pool_size,
1790 &fs_info->generic_worker);
1782 btrfs_init_workers(&fs_info->endio_meta_write_workers, 1791 btrfs_init_workers(&fs_info->endio_meta_write_workers,
1783 "endio-meta-write", fs_info->thread_pool_size); 1792 "endio-meta-write", fs_info->thread_pool_size,
1793 &fs_info->generic_worker);
1784 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", 1794 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1785 fs_info->thread_pool_size); 1795 fs_info->thread_pool_size,
1796 &fs_info->generic_worker);
1786 1797
1787 /* 1798 /*
1788 * endios are largely parallel and should have a very 1799 * endios are largely parallel and should have a very
@@ -1794,12 +1805,8 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1794 fs_info->endio_write_workers.idle_thresh = 2; 1805 fs_info->endio_write_workers.idle_thresh = 2;
1795 fs_info->endio_meta_write_workers.idle_thresh = 2; 1806 fs_info->endio_meta_write_workers.idle_thresh = 2;
1796 1807
1797 fs_info->endio_workers.atomic_worker_start = 1;
1798 fs_info->endio_meta_workers.atomic_worker_start = 1;
1799 fs_info->endio_write_workers.atomic_worker_start = 1;
1800 fs_info->endio_meta_write_workers.atomic_worker_start = 1;
1801
1802 btrfs_start_workers(&fs_info->workers, 1); 1808 btrfs_start_workers(&fs_info->workers, 1);
1809 btrfs_start_workers(&fs_info->generic_worker, 1);
1803 btrfs_start_workers(&fs_info->submit_workers, 1); 1810 btrfs_start_workers(&fs_info->submit_workers, 1);
1804 btrfs_start_workers(&fs_info->delalloc_workers, 1); 1811 btrfs_start_workers(&fs_info->delalloc_workers, 1);
1805 btrfs_start_workers(&fs_info->fixup_workers, 1); 1812 btrfs_start_workers(&fs_info->fixup_workers, 1);
@@ -1807,6 +1814,7 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1807 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 1814 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1808 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1815 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1809 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1816 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1817 btrfs_start_workers(&fs_info->enospc_workers, 1);
1810 1818
1811 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1819 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1812 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1820 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2012,6 +2020,7 @@ fail_chunk_root:
2012 free_extent_buffer(chunk_root->node); 2020 free_extent_buffer(chunk_root->node);
2013 free_extent_buffer(chunk_root->commit_root); 2021 free_extent_buffer(chunk_root->commit_root);
2014fail_sb_buffer: 2022fail_sb_buffer:
2023 btrfs_stop_workers(&fs_info->generic_worker);
2015 btrfs_stop_workers(&fs_info->fixup_workers); 2024 btrfs_stop_workers(&fs_info->fixup_workers);
2016 btrfs_stop_workers(&fs_info->delalloc_workers); 2025 btrfs_stop_workers(&fs_info->delalloc_workers);
2017 btrfs_stop_workers(&fs_info->workers); 2026 btrfs_stop_workers(&fs_info->workers);
@@ -2020,6 +2029,7 @@ fail_sb_buffer:
2020 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2029 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2021 btrfs_stop_workers(&fs_info->endio_write_workers); 2030 btrfs_stop_workers(&fs_info->endio_write_workers);
2022 btrfs_stop_workers(&fs_info->submit_workers); 2031 btrfs_stop_workers(&fs_info->submit_workers);
2032 btrfs_stop_workers(&fs_info->enospc_workers);
2023fail_iput: 2033fail_iput:
2024 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2034 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2025 iput(fs_info->btree_inode); 2035 iput(fs_info->btree_inode);
@@ -2437,6 +2447,7 @@ int close_ctree(struct btrfs_root *root)
2437 2447
2438 iput(fs_info->btree_inode); 2448 iput(fs_info->btree_inode);
2439 2449
2450 btrfs_stop_workers(&fs_info->generic_worker);
2440 btrfs_stop_workers(&fs_info->fixup_workers); 2451 btrfs_stop_workers(&fs_info->fixup_workers);
2441 btrfs_stop_workers(&fs_info->delalloc_workers); 2452 btrfs_stop_workers(&fs_info->delalloc_workers);
2442 btrfs_stop_workers(&fs_info->workers); 2453 btrfs_stop_workers(&fs_info->workers);
@@ -2445,6 +2456,7 @@ int close_ctree(struct btrfs_root *root)
2445 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2456 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2446 btrfs_stop_workers(&fs_info->endio_write_workers); 2457 btrfs_stop_workers(&fs_info->endio_write_workers);
2447 btrfs_stop_workers(&fs_info->submit_workers); 2458 btrfs_stop_workers(&fs_info->submit_workers);
2459 btrfs_stop_workers(&fs_info->enospc_workers);
2448 2460
2449 btrfs_close_devices(fs_info->fs_devices); 2461 btrfs_close_devices(fs_info->fs_devices);
2450 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2462 btrfs_mapping_tree_free(&fs_info->mapping_tree);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 359a754c782c..e238a0cdac67 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1568,23 +1568,23 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1568 return ret; 1568 return ret;
1569} 1569}
1570 1570
1571#ifdef BIO_RW_DISCARD
1572static void btrfs_issue_discard(struct block_device *bdev, 1571static void btrfs_issue_discard(struct block_device *bdev,
1573 u64 start, u64 len) 1572 u64 start, u64 len)
1574{ 1573{
1575 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 1574 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1576 DISCARD_FL_BARRIER); 1575 DISCARD_FL_BARRIER);
1577} 1576}
1578#endif
1579 1577
1580static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1581 u64 num_bytes) 1579 u64 num_bytes)
1582{ 1580{
1583#ifdef BIO_RW_DISCARD
1584 int ret; 1581 int ret;
1585 u64 map_length = num_bytes; 1582 u64 map_length = num_bytes;
1586 struct btrfs_multi_bio *multi = NULL; 1583 struct btrfs_multi_bio *multi = NULL;
1587 1584
1585 if (!btrfs_test_opt(root, DISCARD))
1586 return 0;
1587
1588 /* Tell the block device(s) that the sectors can be discarded */ 1588 /* Tell the block device(s) that the sectors can be discarded */
1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1590 bytenr, &map_length, &multi, 0); 1590 bytenr, &map_length, &multi, 0);
@@ -1604,9 +1604,6 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1604 } 1604 }
1605 1605
1606 return ret; 1606 return ret;
1607#else
1608 return 0;
1609#endif
1610} 1607}
1611 1608
1612int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1609int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2824,14 +2821,17 @@ int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2824 num_items); 2821 num_items);
2825 2822
2826 spin_lock(&meta_sinfo->lock); 2823 spin_lock(&meta_sinfo->lock);
2827 if (BTRFS_I(inode)->delalloc_reserved_extents <= 2824 spin_lock(&BTRFS_I(inode)->accounting_lock);
2828 BTRFS_I(inode)->delalloc_extents) { 2825 if (BTRFS_I(inode)->reserved_extents <=
2826 BTRFS_I(inode)->outstanding_extents) {
2827 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2829 spin_unlock(&meta_sinfo->lock); 2828 spin_unlock(&meta_sinfo->lock);
2830 return 0; 2829 return 0;
2831 } 2830 }
2831 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2832 2832
2833 BTRFS_I(inode)->delalloc_reserved_extents--; 2833 BTRFS_I(inode)->reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->delalloc_reserved_extents < 0); 2834 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2835 2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) { 2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true; 2837 bug = true;
@@ -2864,6 +2864,107 @@ static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2864 meta_sinfo->force_delalloc = 0; 2864 meta_sinfo->force_delalloc = 0;
2865} 2865}
2866 2866
2867struct async_flush {
2868 struct btrfs_root *root;
2869 struct btrfs_space_info *info;
2870 struct btrfs_work work;
2871};
2872
2873static noinline void flush_delalloc_async(struct btrfs_work *work)
2874{
2875 struct async_flush *async;
2876 struct btrfs_root *root;
2877 struct btrfs_space_info *info;
2878
2879 async = container_of(work, struct async_flush, work);
2880 root = async->root;
2881 info = async->info;
2882
2883 btrfs_start_delalloc_inodes(root);
2884 wake_up(&info->flush_wait);
2885 btrfs_wait_ordered_extents(root, 0);
2886
2887 spin_lock(&info->lock);
2888 info->flushing = 0;
2889 spin_unlock(&info->lock);
2890 wake_up(&info->flush_wait);
2891
2892 kfree(async);
2893}
2894
2895static void wait_on_flush(struct btrfs_space_info *info)
2896{
2897 DEFINE_WAIT(wait);
2898 u64 used;
2899
2900 while (1) {
2901 prepare_to_wait(&info->flush_wait, &wait,
2902 TASK_UNINTERRUPTIBLE);
2903 spin_lock(&info->lock);
2904 if (!info->flushing) {
2905 spin_unlock(&info->lock);
2906 break;
2907 }
2908
2909 used = info->bytes_used + info->bytes_reserved +
2910 info->bytes_pinned + info->bytes_readonly +
2911 info->bytes_super + info->bytes_root +
2912 info->bytes_may_use + info->bytes_delalloc;
2913 if (used < info->total_bytes) {
2914 spin_unlock(&info->lock);
2915 break;
2916 }
2917 spin_unlock(&info->lock);
2918 schedule();
2919 }
2920 finish_wait(&info->flush_wait, &wait);
2921}
2922
2923static void flush_delalloc(struct btrfs_root *root,
2924 struct btrfs_space_info *info)
2925{
2926 struct async_flush *async;
2927 bool wait = false;
2928
2929 spin_lock(&info->lock);
2930
2931 if (!info->flushing) {
2932 info->flushing = 1;
2933 init_waitqueue_head(&info->flush_wait);
2934 } else {
2935 wait = true;
2936 }
2937
2938 spin_unlock(&info->lock);
2939
2940 if (wait) {
2941 wait_on_flush(info);
2942 return;
2943 }
2944
2945 async = kzalloc(sizeof(*async), GFP_NOFS);
2946 if (!async)
2947 goto flush;
2948
2949 async->root = root;
2950 async->info = info;
2951 async->work.func = flush_delalloc_async;
2952
2953 btrfs_queue_worker(&root->fs_info->enospc_workers,
2954 &async->work);
2955 wait_on_flush(info);
2956 return;
2957
2958flush:
2959 btrfs_start_delalloc_inodes(root);
2960 btrfs_wait_ordered_extents(root, 0);
2961
2962 spin_lock(&info->lock);
2963 info->flushing = 0;
2964 spin_unlock(&info->lock);
2965 wake_up(&info->flush_wait);
2966}
2967
2867static int maybe_allocate_chunk(struct btrfs_root *root, 2968static int maybe_allocate_chunk(struct btrfs_root *root,
2868 struct btrfs_space_info *info) 2969 struct btrfs_space_info *info)
2869{ 2970{
@@ -2894,7 +2995,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2894 if (!info->allocating_chunk) { 2995 if (!info->allocating_chunk) {
2895 info->force_alloc = 1; 2996 info->force_alloc = 1;
2896 info->allocating_chunk = 1; 2997 info->allocating_chunk = 1;
2897 init_waitqueue_head(&info->wait); 2998 init_waitqueue_head(&info->allocate_wait);
2898 } else { 2999 } else {
2899 wait = true; 3000 wait = true;
2900 } 3001 }
@@ -2902,7 +3003,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2902 spin_unlock(&info->lock); 3003 spin_unlock(&info->lock);
2903 3004
2904 if (wait) { 3005 if (wait) {
2905 wait_event(info->wait, 3006 wait_event(info->allocate_wait,
2906 !info->allocating_chunk); 3007 !info->allocating_chunk);
2907 return 1; 3008 return 1;
2908 } 3009 }
@@ -2923,7 +3024,7 @@ out:
2923 spin_lock(&info->lock); 3024 spin_lock(&info->lock);
2924 info->allocating_chunk = 0; 3025 info->allocating_chunk = 0;
2925 spin_unlock(&info->lock); 3026 spin_unlock(&info->lock);
2926 wake_up(&info->wait); 3027 wake_up(&info->allocate_wait);
2927 3028
2928 if (ret) 3029 if (ret)
2929 return 0; 3030 return 0;
@@ -2981,21 +3082,20 @@ again:
2981 filemap_flush(inode->i_mapping); 3082 filemap_flush(inode->i_mapping);
2982 goto again; 3083 goto again;
2983 } else if (flushed == 3) { 3084 } else if (flushed == 3) {
2984 btrfs_start_delalloc_inodes(root); 3085 flush_delalloc(root, meta_sinfo);
2985 btrfs_wait_ordered_extents(root, 0);
2986 goto again; 3086 goto again;
2987 } 3087 }
2988 spin_lock(&meta_sinfo->lock); 3088 spin_lock(&meta_sinfo->lock);
2989 meta_sinfo->bytes_delalloc -= num_bytes; 3089 meta_sinfo->bytes_delalloc -= num_bytes;
2990 spin_unlock(&meta_sinfo->lock); 3090 spin_unlock(&meta_sinfo->lock);
2991 printk(KERN_ERR "enospc, has %d, reserved %d\n", 3091 printk(KERN_ERR "enospc, has %d, reserved %d\n",
2992 BTRFS_I(inode)->delalloc_extents, 3092 BTRFS_I(inode)->outstanding_extents,
2993 BTRFS_I(inode)->delalloc_reserved_extents); 3093 BTRFS_I(inode)->reserved_extents);
2994 dump_space_info(meta_sinfo, 0, 0); 3094 dump_space_info(meta_sinfo, 0, 0);
2995 return -ENOSPC; 3095 return -ENOSPC;
2996 } 3096 }
2997 3097
2998 BTRFS_I(inode)->delalloc_reserved_extents++; 3098 BTRFS_I(inode)->reserved_extents++;
2999 check_force_delalloc(meta_sinfo); 3099 check_force_delalloc(meta_sinfo);
3000 spin_unlock(&meta_sinfo->lock); 3100 spin_unlock(&meta_sinfo->lock);
3001 3101
@@ -3094,8 +3194,7 @@ again:
3094 } 3194 }
3095 3195
3096 if (retries == 2) { 3196 if (retries == 2) {
3097 btrfs_start_delalloc_inodes(root); 3197 flush_delalloc(root, meta_sinfo);
3098 btrfs_wait_ordered_extents(root, 0);
3099 goto again; 3198 goto again;
3100 } 3199 }
3101 spin_lock(&meta_sinfo->lock); 3200 spin_lock(&meta_sinfo->lock);
@@ -3588,6 +3687,14 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
3588 if (is_data) 3687 if (is_data)
3589 goto pinit; 3688 goto pinit;
3590 3689
3690 /*
3691 * discard is sloooow, and so triggering discards on
3692 * individual btree blocks isn't a good plan. Just
3693 * pin everything in discard mode.
3694 */
3695 if (btrfs_test_opt(root, DISCARD))
3696 goto pinit;
3697
3591 buf = btrfs_find_tree_block(root, bytenr, num_bytes); 3698 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3592 if (!buf) 3699 if (!buf)
3593 goto pinit; 3700 goto pinit;
@@ -4029,6 +4136,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4029 int loop = 0; 4136 int loop = 0;
4030 bool found_uncached_bg = false; 4137 bool found_uncached_bg = false;
4031 bool failed_cluster_refill = false; 4138 bool failed_cluster_refill = false;
4139 bool failed_alloc = false;
4032 4140
4033 WARN_ON(num_bytes < root->sectorsize); 4141 WARN_ON(num_bytes < root->sectorsize);
4034 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4142 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4233,14 +4341,23 @@ refill_cluster:
4233 4341
4234 offset = btrfs_find_space_for_alloc(block_group, search_start, 4342 offset = btrfs_find_space_for_alloc(block_group, search_start,
4235 num_bytes, empty_size); 4343 num_bytes, empty_size);
4236 if (!offset && (cached || (!cached && 4344 /*
4237 loop == LOOP_CACHING_NOWAIT))) { 4345 * If we didn't find a chunk, and we haven't failed on this
4238 goto loop; 4346 * block group before, and this block group is in the middle of
4239 } else if (!offset && (!cached && 4347 * caching and we are ok with waiting, then go ahead and wait
4240 loop > LOOP_CACHING_NOWAIT)) { 4348 * for progress to be made, and set failed_alloc to true.
4349 *
4350 * If failed_alloc is true then we've already waited on this
4351 * block group once and should move on to the next block group.
4352 */
4353 if (!offset && !failed_alloc && !cached &&
4354 loop > LOOP_CACHING_NOWAIT) {
4241 wait_block_group_cache_progress(block_group, 4355 wait_block_group_cache_progress(block_group,
4242 num_bytes + empty_size); 4356 num_bytes + empty_size);
4357 failed_alloc = true;
4243 goto have_block_group; 4358 goto have_block_group;
4359 } else if (!offset) {
4360 goto loop;
4244 } 4361 }
4245checks: 4362checks:
4246 search_start = stripe_align(root, offset); 4363 search_start = stripe_align(root, offset);
@@ -4288,6 +4405,7 @@ checks:
4288 break; 4405 break;
4289loop: 4406loop:
4290 failed_cluster_refill = false; 4407 failed_cluster_refill = false;
4408 failed_alloc = false;
4291 btrfs_put_block_group(block_group); 4409 btrfs_put_block_group(block_group);
4292 } 4410 }
4293 up_read(&space_info->groups_sem); 4411 up_read(&space_info->groups_sem);
@@ -4799,6 +4917,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4799 u64 bytenr; 4917 u64 bytenr;
4800 u64 generation; 4918 u64 generation;
4801 u64 refs; 4919 u64 refs;
4920 u64 flags;
4802 u64 last = 0; 4921 u64 last = 0;
4803 u32 nritems; 4922 u32 nritems;
4804 u32 blocksize; 4923 u32 blocksize;
@@ -4836,15 +4955,19 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4836 generation <= root->root_key.offset) 4955 generation <= root->root_key.offset)
4837 continue; 4956 continue;
4838 4957
4958 /* We don't lock the tree block, it's OK to be racy here */
4959 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4960 &refs, &flags);
4961 BUG_ON(ret);
4962 BUG_ON(refs == 0);
4963
4839 if (wc->stage == DROP_REFERENCE) { 4964 if (wc->stage == DROP_REFERENCE) {
4840 ret = btrfs_lookup_extent_info(trans, root,
4841 bytenr, blocksize,
4842 &refs, NULL);
4843 BUG_ON(ret);
4844 BUG_ON(refs == 0);
4845 if (refs == 1) 4965 if (refs == 1)
4846 goto reada; 4966 goto reada;
4847 4967
4968 if (wc->level == 1 &&
4969 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4970 continue;
4848 if (!wc->update_ref || 4971 if (!wc->update_ref ||
4849 generation <= root->root_key.offset) 4972 generation <= root->root_key.offset)
4850 continue; 4973 continue;
@@ -4853,6 +4976,10 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4853 &wc->update_progress); 4976 &wc->update_progress);
4854 if (ret < 0) 4977 if (ret < 0)
4855 continue; 4978 continue;
4979 } else {
4980 if (wc->level == 1 &&
4981 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4982 continue;
4856 } 4983 }
4857reada: 4984reada:
4858 ret = readahead_tree_block(root, bytenr, blocksize, 4985 ret = readahead_tree_block(root, bytenr, blocksize,
@@ -4876,7 +5003,7 @@ reada:
4876static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5003static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4877 struct btrfs_root *root, 5004 struct btrfs_root *root,
4878 struct btrfs_path *path, 5005 struct btrfs_path *path,
4879 struct walk_control *wc) 5006 struct walk_control *wc, int lookup_info)
4880{ 5007{
4881 int level = wc->level; 5008 int level = wc->level;
4882 struct extent_buffer *eb = path->nodes[level]; 5009 struct extent_buffer *eb = path->nodes[level];
@@ -4891,8 +5018,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4891 * when reference count of tree block is 1, it won't increase 5018 * when reference count of tree block is 1, it won't increase
4892 * again. once full backref flag is set, we never clear it. 5019 * again. once full backref flag is set, we never clear it.
4893 */ 5020 */
4894 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5021 if (lookup_info &&
4895 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) { 5022 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5023 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
4896 BUG_ON(!path->locks[level]); 5024 BUG_ON(!path->locks[level]);
4897 ret = btrfs_lookup_extent_info(trans, root, 5025 ret = btrfs_lookup_extent_info(trans, root,
4898 eb->start, eb->len, 5026 eb->start, eb->len,
@@ -4953,7 +5081,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4953static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5081static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4954 struct btrfs_root *root, 5082 struct btrfs_root *root,
4955 struct btrfs_path *path, 5083 struct btrfs_path *path,
4956 struct walk_control *wc) 5084 struct walk_control *wc, int *lookup_info)
4957{ 5085{
4958 u64 bytenr; 5086 u64 bytenr;
4959 u64 generation; 5087 u64 generation;
@@ -4973,8 +5101,10 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4973 * for the subtree 5101 * for the subtree
4974 */ 5102 */
4975 if (wc->stage == UPDATE_BACKREF && 5103 if (wc->stage == UPDATE_BACKREF &&
4976 generation <= root->root_key.offset) 5104 generation <= root->root_key.offset) {
5105 *lookup_info = 1;
4977 return 1; 5106 return 1;
5107 }
4978 5108
4979 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5109 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4980 blocksize = btrfs_level_size(root, level - 1); 5110 blocksize = btrfs_level_size(root, level - 1);
@@ -4987,14 +5117,19 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4987 btrfs_tree_lock(next); 5117 btrfs_tree_lock(next);
4988 btrfs_set_lock_blocking(next); 5118 btrfs_set_lock_blocking(next);
4989 5119
4990 if (wc->stage == DROP_REFERENCE) { 5120 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4991 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 5121 &wc->refs[level - 1],
4992 &wc->refs[level - 1], 5122 &wc->flags[level - 1]);
4993 &wc->flags[level - 1]); 5123 BUG_ON(ret);
4994 BUG_ON(ret); 5124 BUG_ON(wc->refs[level - 1] == 0);
4995 BUG_ON(wc->refs[level - 1] == 0); 5125 *lookup_info = 0;
4996 5126
5127 if (wc->stage == DROP_REFERENCE) {
4997 if (wc->refs[level - 1] > 1) { 5128 if (wc->refs[level - 1] > 1) {
5129 if (level == 1 &&
5130 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5131 goto skip;
5132
4998 if (!wc->update_ref || 5133 if (!wc->update_ref ||
4999 generation <= root->root_key.offset) 5134 generation <= root->root_key.offset)
5000 goto skip; 5135 goto skip;
@@ -5008,12 +5143,17 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5008 wc->stage = UPDATE_BACKREF; 5143 wc->stage = UPDATE_BACKREF;
5009 wc->shared_level = level - 1; 5144 wc->shared_level = level - 1;
5010 } 5145 }
5146 } else {
5147 if (level == 1 &&
5148 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5149 goto skip;
5011 } 5150 }
5012 5151
5013 if (!btrfs_buffer_uptodate(next, generation)) { 5152 if (!btrfs_buffer_uptodate(next, generation)) {
5014 btrfs_tree_unlock(next); 5153 btrfs_tree_unlock(next);
5015 free_extent_buffer(next); 5154 free_extent_buffer(next);
5016 next = NULL; 5155 next = NULL;
5156 *lookup_info = 1;
5017 } 5157 }
5018 5158
5019 if (!next) { 5159 if (!next) {
@@ -5036,21 +5176,22 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5036skip: 5176skip:
5037 wc->refs[level - 1] = 0; 5177 wc->refs[level - 1] = 0;
5038 wc->flags[level - 1] = 0; 5178 wc->flags[level - 1] = 0;
5179 if (wc->stage == DROP_REFERENCE) {
5180 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5181 parent = path->nodes[level]->start;
5182 } else {
5183 BUG_ON(root->root_key.objectid !=
5184 btrfs_header_owner(path->nodes[level]));
5185 parent = 0;
5186 }
5039 5187
5040 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5188 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5041 parent = path->nodes[level]->start; 5189 root->root_key.objectid, level - 1, 0);
5042 } else { 5190 BUG_ON(ret);
5043 BUG_ON(root->root_key.objectid !=
5044 btrfs_header_owner(path->nodes[level]));
5045 parent = 0;
5046 } 5191 }
5047
5048 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5049 root->root_key.objectid, level - 1, 0);
5050 BUG_ON(ret);
5051
5052 btrfs_tree_unlock(next); 5192 btrfs_tree_unlock(next);
5053 free_extent_buffer(next); 5193 free_extent_buffer(next);
5194 *lookup_info = 1;
5054 return 1; 5195 return 1;
5055} 5196}
5056 5197
@@ -5164,6 +5305,7 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5164 struct walk_control *wc) 5305 struct walk_control *wc)
5165{ 5306{
5166 int level = wc->level; 5307 int level = wc->level;
5308 int lookup_info = 1;
5167 int ret; 5309 int ret;
5168 5310
5169 while (level >= 0) { 5311 while (level >= 0) {
@@ -5171,14 +5313,14 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5171 btrfs_header_nritems(path->nodes[level])) 5313 btrfs_header_nritems(path->nodes[level]))
5172 break; 5314 break;
5173 5315
5174 ret = walk_down_proc(trans, root, path, wc); 5316 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5175 if (ret > 0) 5317 if (ret > 0)
5176 break; 5318 break;
5177 5319
5178 if (level == 0) 5320 if (level == 0)
5179 break; 5321 break;
5180 5322
5181 ret = do_walk_down(trans, root, path, wc); 5323 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5182 if (ret > 0) { 5324 if (ret > 0) {
5183 path->slots[level]++; 5325 path->slots[level]++;
5184 continue; 5326 continue;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index de1793ba004a..96577e8bf9fd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -460,7 +460,8 @@ static int clear_state_bit(struct extent_io_tree *tree,
460 struct extent_state *state, int bits, int wake, 460 struct extent_state *state, int bits, int wake,
461 int delete) 461 int delete)
462{ 462{
463 int ret = state->state & bits; 463 int bits_to_clear = bits & ~EXTENT_DO_ACCOUNTING;
464 int ret = state->state & bits_to_clear;
464 465
465 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 466 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
466 u64 range = state->end - state->start + 1; 467 u64 range = state->end - state->start + 1;
@@ -468,7 +469,7 @@ static int clear_state_bit(struct extent_io_tree *tree,
468 tree->dirty_bytes -= range; 469 tree->dirty_bytes -= range;
469 } 470 }
470 clear_state_cb(tree, state, bits); 471 clear_state_cb(tree, state, bits);
471 state->state &= ~bits; 472 state->state &= ~bits_to_clear;
472 if (wake) 473 if (wake)
473 wake_up(&state->wq); 474 wake_up(&state->wq);
474 if (delete || state->state == 0) { 475 if (delete || state->state == 0) {
@@ -956,7 +957,8 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
956 gfp_t mask) 957 gfp_t mask)
957{ 958{
958 return clear_extent_bit(tree, start, end, 959 return clear_extent_bit(tree, start, end,
959 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, 960 EXTENT_DIRTY | EXTENT_DELALLOC |
961 EXTENT_DO_ACCOUNTING, 0, 0,
960 NULL, mask); 962 NULL, mask);
961} 963}
962 964
@@ -1401,12 +1403,7 @@ out_failed:
1401int extent_clear_unlock_delalloc(struct inode *inode, 1403int extent_clear_unlock_delalloc(struct inode *inode,
1402 struct extent_io_tree *tree, 1404 struct extent_io_tree *tree,
1403 u64 start, u64 end, struct page *locked_page, 1405 u64 start, u64 end, struct page *locked_page,
1404 int unlock_pages, 1406 unsigned long op)
1405 int clear_unlock,
1406 int clear_delalloc, int clear_dirty,
1407 int set_writeback,
1408 int end_writeback,
1409 int set_private2)
1410{ 1407{
1411 int ret; 1408 int ret;
1412 struct page *pages[16]; 1409 struct page *pages[16];
@@ -1416,17 +1413,21 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1416 int i; 1413 int i;
1417 int clear_bits = 0; 1414 int clear_bits = 0;
1418 1415
1419 if (clear_unlock) 1416 if (op & EXTENT_CLEAR_UNLOCK)
1420 clear_bits |= EXTENT_LOCKED; 1417 clear_bits |= EXTENT_LOCKED;
1421 if (clear_dirty) 1418 if (op & EXTENT_CLEAR_DIRTY)
1422 clear_bits |= EXTENT_DIRTY; 1419 clear_bits |= EXTENT_DIRTY;
1423 1420
1424 if (clear_delalloc) 1421 if (op & EXTENT_CLEAR_DELALLOC)
1425 clear_bits |= EXTENT_DELALLOC; 1422 clear_bits |= EXTENT_DELALLOC;
1426 1423
1424 if (op & EXTENT_CLEAR_ACCOUNTING)
1425 clear_bits |= EXTENT_DO_ACCOUNTING;
1426
1427 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1427 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1428 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback || 1428 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1429 set_private2)) 1429 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1430 EXTENT_SET_PRIVATE2)))
1430 return 0; 1431 return 0;
1431 1432
1432 while (nr_pages > 0) { 1433 while (nr_pages > 0) {
@@ -1435,20 +1436,20 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1435 nr_pages, ARRAY_SIZE(pages)), pages); 1436 nr_pages, ARRAY_SIZE(pages)), pages);
1436 for (i = 0; i < ret; i++) { 1437 for (i = 0; i < ret; i++) {
1437 1438
1438 if (set_private2) 1439 if (op & EXTENT_SET_PRIVATE2)
1439 SetPagePrivate2(pages[i]); 1440 SetPagePrivate2(pages[i]);
1440 1441
1441 if (pages[i] == locked_page) { 1442 if (pages[i] == locked_page) {
1442 page_cache_release(pages[i]); 1443 page_cache_release(pages[i]);
1443 continue; 1444 continue;
1444 } 1445 }
1445 if (clear_dirty) 1446 if (op & EXTENT_CLEAR_DIRTY)
1446 clear_page_dirty_for_io(pages[i]); 1447 clear_page_dirty_for_io(pages[i]);
1447 if (set_writeback) 1448 if (op & EXTENT_SET_WRITEBACK)
1448 set_page_writeback(pages[i]); 1449 set_page_writeback(pages[i]);
1449 if (end_writeback) 1450 if (op & EXTENT_END_WRITEBACK)
1450 end_page_writeback(pages[i]); 1451 end_page_writeback(pages[i]);
1451 if (unlock_pages) 1452 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1452 unlock_page(pages[i]); 1453 unlock_page(pages[i]);
1453 page_cache_release(pages[i]); 1454 page_cache_release(pages[i]);
1454 } 1455 }
@@ -2714,7 +2715,8 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2714 lock_extent(tree, start, end, GFP_NOFS); 2715 lock_extent(tree, start, end, GFP_NOFS);
2715 wait_on_page_writeback(page); 2716 wait_on_page_writeback(page);
2716 clear_extent_bit(tree, start, end, 2717 clear_extent_bit(tree, start, end,
2717 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 2718 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2719 EXTENT_DO_ACCOUNTING,
2718 1, 1, NULL, GFP_NOFS); 2720 1, 1, NULL, GFP_NOFS);
2719 return 0; 2721 return 0;
2720} 2722}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4794ec891fed..36de250a7b2b 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -15,6 +15,7 @@
15#define EXTENT_BUFFER_FILLED (1 << 8) 15#define EXTENT_BUFFER_FILLED (1 << 8)
16#define EXTENT_BOUNDARY (1 << 9) 16#define EXTENT_BOUNDARY (1 << 9)
17#define EXTENT_NODATASUM (1 << 10) 17#define EXTENT_NODATASUM (1 << 10)
18#define EXTENT_DO_ACCOUNTING (1 << 11)
18#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 19#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
19 20
20/* flags for bio submission */ 21/* flags for bio submission */
@@ -25,6 +26,16 @@
25#define EXTENT_BUFFER_BLOCKING 1 26#define EXTENT_BUFFER_BLOCKING 1
26#define EXTENT_BUFFER_DIRTY 2 27#define EXTENT_BUFFER_DIRTY 2
27 28
29/* these are flags for extent_clear_unlock_delalloc */
30#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
31#define EXTENT_CLEAR_UNLOCK 0x2
32#define EXTENT_CLEAR_DELALLOC 0x4
33#define EXTENT_CLEAR_DIRTY 0x8
34#define EXTENT_SET_WRITEBACK 0x10
35#define EXTENT_END_WRITEBACK 0x20
36#define EXTENT_SET_PRIVATE2 0x40
37#define EXTENT_CLEAR_ACCOUNTING 0x80
38
28/* 39/*
29 * page->private values. Every page that is controlled by the extent 40 * page->private values. Every page that is controlled by the extent
30 * map has page->private set to one. 41 * map has page->private set to one.
@@ -288,10 +299,5 @@ int extent_range_uptodate(struct extent_io_tree *tree,
288int extent_clear_unlock_delalloc(struct inode *inode, 299int extent_clear_unlock_delalloc(struct inode *inode,
289 struct extent_io_tree *tree, 300 struct extent_io_tree *tree,
290 u64 start, u64 end, struct page *locked_page, 301 u64 start, u64 end, struct page *locked_page,
291 int unlock_page, 302 unsigned long op);
292 int clear_unlock,
293 int clear_delalloc, int clear_dirty,
294 int set_writeback,
295 int end_writeback,
296 int set_private2);
297#endif 303#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f19e1259a971..06550affbd27 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -878,7 +878,8 @@ again:
878 btrfs_put_ordered_extent(ordered); 878 btrfs_put_ordered_extent(ordered);
879 879
880 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 880 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
881 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, 881 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
882 EXTENT_DO_ACCOUNTING,
882 GFP_NOFS); 883 GFP_NOFS);
883 unlock_extent(&BTRFS_I(inode)->io_tree, 884 unlock_extent(&BTRFS_I(inode)->io_tree,
884 start_pos, last_pos - 1, GFP_NOFS); 885 start_pos, last_pos - 1, GFP_NOFS);
@@ -1085,8 +1086,10 @@ out_nolock:
1085 btrfs_end_transaction(trans, root); 1086 btrfs_end_transaction(trans, root);
1086 else 1087 else
1087 btrfs_commit_transaction(trans, root); 1088 btrfs_commit_transaction(trans, root);
1088 } else { 1089 } else if (ret != BTRFS_NO_LOG_SYNC) {
1089 btrfs_commit_transaction(trans, root); 1090 btrfs_commit_transaction(trans, root);
1091 } else {
1092 btrfs_end_transaction(trans, root);
1090 } 1093 }
1091 } 1094 }
1092 if (file->f_flags & O_DIRECT) { 1095 if (file->f_flags & O_DIRECT) {
@@ -1136,6 +1139,13 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1136 int ret = 0; 1139 int ret = 0;
1137 struct btrfs_trans_handle *trans; 1140 struct btrfs_trans_handle *trans;
1138 1141
1142
1143 /* we wait first, since the writeback may change the inode */
1144 root->log_batch++;
1145 /* the VFS called filemap_fdatawrite for us */
1146 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1147 root->log_batch++;
1148
1139 /* 1149 /*
1140 * check the transaction that last modified this inode 1150 * check the transaction that last modified this inode
1141 * and see if its already been committed 1151 * and see if its already been committed
@@ -1143,6 +1153,11 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1143 if (!BTRFS_I(inode)->last_trans) 1153 if (!BTRFS_I(inode)->last_trans)
1144 goto out; 1154 goto out;
1145 1155
1156 /*
1157 * if the last transaction that changed this file was before
1158 * the current transaction, we can bail out now without any
1159 * syncing
1160 */
1146 mutex_lock(&root->fs_info->trans_mutex); 1161 mutex_lock(&root->fs_info->trans_mutex);
1147 if (BTRFS_I(inode)->last_trans <= 1162 if (BTRFS_I(inode)->last_trans <=
1148 root->fs_info->last_trans_committed) { 1163 root->fs_info->last_trans_committed) {
@@ -1152,13 +1167,6 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1152 } 1167 }
1153 mutex_unlock(&root->fs_info->trans_mutex); 1168 mutex_unlock(&root->fs_info->trans_mutex);
1154 1169
1155 root->log_batch++;
1156 filemap_fdatawrite(inode->i_mapping);
1157 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1158 root->log_batch++;
1159
1160 if (datasync && !(inode->i_state & I_DIRTY_PAGES))
1161 goto out;
1162 /* 1170 /*
1163 * ok we haven't committed the transaction yet, lets do a commit 1171 * ok we haven't committed the transaction yet, lets do a commit
1164 */ 1172 */
@@ -1187,14 +1195,18 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1187 */ 1195 */
1188 mutex_unlock(&dentry->d_inode->i_mutex); 1196 mutex_unlock(&dentry->d_inode->i_mutex);
1189 1197
1190 if (ret > 0) { 1198 if (ret != BTRFS_NO_LOG_SYNC) {
1191 ret = btrfs_commit_transaction(trans, root); 1199 if (ret > 0) {
1192 } else {
1193 ret = btrfs_sync_log(trans, root);
1194 if (ret == 0)
1195 ret = btrfs_end_transaction(trans, root);
1196 else
1197 ret = btrfs_commit_transaction(trans, root); 1200 ret = btrfs_commit_transaction(trans, root);
1201 } else {
1202 ret = btrfs_sync_log(trans, root);
1203 if (ret == 0)
1204 ret = btrfs_end_transaction(trans, root);
1205 else
1206 ret = btrfs_commit_transaction(trans, root);
1207 }
1208 } else {
1209 ret = btrfs_end_transaction(trans, root);
1198 } 1210 }
1199 mutex_lock(&dentry->d_inode->i_mutex); 1211 mutex_lock(&dentry->d_inode->i_mutex);
1200out: 1212out:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 112e5aa85892..dae12dc7e159 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -424,9 +424,12 @@ again:
424 * and free up our temp pages. 424 * and free up our temp pages.
425 */ 425 */
426 extent_clear_unlock_delalloc(inode, 426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree, 427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL, 1, 0, 428 start, end, NULL,
429 0, 1, 1, 1, 0); 429 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
430 EXTENT_CLEAR_DELALLOC |
431 EXTENT_CLEAR_ACCOUNTING |
432 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
430 ret = 0; 433 ret = 0;
431 goto free_pages_out; 434 goto free_pages_out;
432 } 435 }
@@ -637,11 +640,14 @@ static noinline int submit_compressed_extents(struct inode *inode,
637 * clear dirty, set writeback and unlock the pages. 640 * clear dirty, set writeback and unlock the pages.
638 */ 641 */
639 extent_clear_unlock_delalloc(inode, 642 extent_clear_unlock_delalloc(inode,
640 &BTRFS_I(inode)->io_tree, 643 &BTRFS_I(inode)->io_tree,
641 async_extent->start, 644 async_extent->start,
642 async_extent->start + 645 async_extent->start +
643 async_extent->ram_size - 1, 646 async_extent->ram_size - 1,
644 NULL, 1, 1, 0, 1, 1, 0, 0); 647 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
648 EXTENT_CLEAR_UNLOCK |
649 EXTENT_CLEAR_DELALLOC |
650 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
645 651
646 ret = btrfs_submit_compressed_write(inode, 652 ret = btrfs_submit_compressed_write(inode,
647 async_extent->start, 653 async_extent->start,
@@ -712,9 +718,15 @@ static noinline int cow_file_range(struct inode *inode,
712 start, end, 0, NULL); 718 start, end, 0, NULL);
713 if (ret == 0) { 719 if (ret == 0) {
714 extent_clear_unlock_delalloc(inode, 720 extent_clear_unlock_delalloc(inode,
715 &BTRFS_I(inode)->io_tree, 721 &BTRFS_I(inode)->io_tree,
716 start, end, NULL, 1, 1, 722 start, end, NULL,
717 1, 1, 1, 1, 0); 723 EXTENT_CLEAR_UNLOCK_PAGE |
724 EXTENT_CLEAR_UNLOCK |
725 EXTENT_CLEAR_DELALLOC |
726 EXTENT_CLEAR_ACCOUNTING |
727 EXTENT_CLEAR_DIRTY |
728 EXTENT_SET_WRITEBACK |
729 EXTENT_END_WRITEBACK);
718 *nr_written = *nr_written + 730 *nr_written = *nr_written +
719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 731 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720 *page_started = 1; 732 *page_started = 1;
@@ -738,6 +750,8 @@ static noinline int cow_file_range(struct inode *inode,
738 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 750 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
739 751
740 while (disk_num_bytes > 0) { 752 while (disk_num_bytes > 0) {
753 unsigned long op;
754
741 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); 755 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
742 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 756 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
743 root->sectorsize, 0, alloc_hint, 757 root->sectorsize, 0, alloc_hint,
@@ -789,10 +803,13 @@ static noinline int cow_file_range(struct inode *inode,
789 * Do set the Private2 bit so we know this page was properly 803 * Do set the Private2 bit so we know this page was properly
790 * setup for writepage 804 * setup for writepage
791 */ 805 */
806 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
807 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
808 EXTENT_SET_PRIVATE2;
809
792 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 810 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
793 start, start + ram_size - 1, 811 start, start + ram_size - 1,
794 locked_page, unlock, 1, 812 locked_page, op);
795 1, 0, 0, 0, 1);
796 disk_num_bytes -= cur_alloc_size; 813 disk_num_bytes -= cur_alloc_size;
797 num_bytes -= cur_alloc_size; 814 num_bytes -= cur_alloc_size;
798 alloc_hint = ins.objectid + ins.offset; 815 alloc_hint = ins.objectid + ins.offset;
@@ -864,8 +881,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
864 u64 cur_end; 881 u64 cur_end;
865 int limit = 10 * 1024 * 1042; 882 int limit = 10 * 1024 * 1042;
866 883
867 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | 884 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
868 EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS); 885 1, 0, NULL, GFP_NOFS);
869 while (start < end) { 886 while (start < end) {
870 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 887 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
871 async_cow->inode = inode; 888 async_cow->inode = inode;
@@ -1006,6 +1023,7 @@ next_slot:
1006 1023
1007 if (found_key.offset > cur_offset) { 1024 if (found_key.offset > cur_offset) {
1008 extent_end = found_key.offset; 1025 extent_end = found_key.offset;
1026 extent_type = 0;
1009 goto out_check; 1027 goto out_check;
1010 } 1028 }
1011 1029
@@ -1112,8 +1130,10 @@ out_check:
1112 BUG_ON(ret); 1130 BUG_ON(ret);
1113 1131
1114 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1132 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1115 cur_offset, cur_offset + num_bytes - 1, 1133 cur_offset, cur_offset + num_bytes - 1,
1116 locked_page, 1, 1, 1, 0, 0, 0, 1); 1134 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1135 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1136 EXTENT_SET_PRIVATE2);
1117 cur_offset = extent_end; 1137 cur_offset = extent_end;
1118 if (cur_offset > end) 1138 if (cur_offset > end)
1119 break; 1139 break;
@@ -1178,15 +1198,17 @@ static int btrfs_split_extent_hook(struct inode *inode,
1178 root->fs_info->max_extent); 1198 root->fs_info->max_extent);
1179 1199
1180 /* 1200 /*
1181 * if we break a large extent up then leave delalloc_extents be, 1201 * if we break a large extent up then leave oustanding_extents
1182 * since we've already accounted for the large extent. 1202 * be, since we've already accounted for the large extent.
1183 */ 1203 */
1184 if (div64_u64(new_size + root->fs_info->max_extent - 1, 1204 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1185 root->fs_info->max_extent) < num_extents) 1205 root->fs_info->max_extent) < num_extents)
1186 return 0; 1206 return 0;
1187 } 1207 }
1188 1208
1189 BTRFS_I(inode)->delalloc_extents++; 1209 spin_lock(&BTRFS_I(inode)->accounting_lock);
1210 BTRFS_I(inode)->outstanding_extents++;
1211 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1190 1212
1191 return 0; 1213 return 0;
1192} 1214}
@@ -1217,7 +1239,9 @@ static int btrfs_merge_extent_hook(struct inode *inode,
1217 1239
1218 /* we're not bigger than the max, unreserve the space and go */ 1240 /* we're not bigger than the max, unreserve the space and go */
1219 if (new_size <= root->fs_info->max_extent) { 1241 if (new_size <= root->fs_info->max_extent) {
1220 BTRFS_I(inode)->delalloc_extents--; 1242 spin_lock(&BTRFS_I(inode)->accounting_lock);
1243 BTRFS_I(inode)->outstanding_extents--;
1244 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1221 return 0; 1245 return 0;
1222 } 1246 }
1223 1247
@@ -1231,7 +1255,9 @@ static int btrfs_merge_extent_hook(struct inode *inode,
1231 root->fs_info->max_extent) > num_extents) 1255 root->fs_info->max_extent) > num_extents)
1232 return 0; 1256 return 0;
1233 1257
1234 BTRFS_I(inode)->delalloc_extents--; 1258 spin_lock(&BTRFS_I(inode)->accounting_lock);
1259 BTRFS_I(inode)->outstanding_extents--;
1260 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1235 1261
1236 return 0; 1262 return 0;
1237} 1263}
@@ -1253,7 +1279,9 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1253 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1279 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1254 struct btrfs_root *root = BTRFS_I(inode)->root; 1280 struct btrfs_root *root = BTRFS_I(inode)->root;
1255 1281
1256 BTRFS_I(inode)->delalloc_extents++; 1282 spin_lock(&BTRFS_I(inode)->accounting_lock);
1283 BTRFS_I(inode)->outstanding_extents++;
1284 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1257 btrfs_delalloc_reserve_space(root, inode, end - start + 1); 1285 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1258 spin_lock(&root->fs_info->delalloc_lock); 1286 spin_lock(&root->fs_info->delalloc_lock);
1259 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1287 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
@@ -1281,8 +1309,12 @@ static int btrfs_clear_bit_hook(struct inode *inode,
1281 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1309 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1282 struct btrfs_root *root = BTRFS_I(inode)->root; 1310 struct btrfs_root *root = BTRFS_I(inode)->root;
1283 1311
1284 BTRFS_I(inode)->delalloc_extents--; 1312 if (bits & EXTENT_DO_ACCOUNTING) {
1285 btrfs_unreserve_metadata_for_delalloc(root, inode, 1); 1313 spin_lock(&BTRFS_I(inode)->accounting_lock);
1314 BTRFS_I(inode)->outstanding_extents--;
1315 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1316 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1317 }
1286 1318
1287 spin_lock(&root->fs_info->delalloc_lock); 1319 spin_lock(&root->fs_info->delalloc_lock);
1288 if (state->end - state->start + 1 > 1320 if (state->end - state->start + 1 >
@@ -3000,12 +3032,22 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3000 3032
3001 if ((offset & (blocksize - 1)) == 0) 3033 if ((offset & (blocksize - 1)) == 0)
3002 goto out; 3034 goto out;
3035 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3036 if (ret)
3037 goto out;
3038
3039 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3040 if (ret)
3041 goto out;
3003 3042
3004 ret = -ENOMEM; 3043 ret = -ENOMEM;
3005again: 3044again:
3006 page = grab_cache_page(mapping, index); 3045 page = grab_cache_page(mapping, index);
3007 if (!page) 3046 if (!page) {
3047 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3048 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3008 goto out; 3049 goto out;
3050 }
3009 3051
3010 page_start = page_offset(page); 3052 page_start = page_offset(page);
3011 page_end = page_start + PAGE_CACHE_SIZE - 1; 3053 page_end = page_start + PAGE_CACHE_SIZE - 1;
@@ -3038,6 +3080,10 @@ again:
3038 goto again; 3080 goto again;
3039 } 3081 }
3040 3082
3083 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3084 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3085 GFP_NOFS);
3086
3041 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 3087 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3042 if (ret) { 3088 if (ret) {
3043 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3089 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
@@ -3056,6 +3102,9 @@ again:
3056 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3102 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3057 3103
3058out_unlock: 3104out_unlock:
3105 if (ret)
3106 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3107 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3059 unlock_page(page); 3108 unlock_page(page);
3060 page_cache_release(page); 3109 page_cache_release(page);
3061out: 3110out:
@@ -3079,7 +3128,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3079 if (size <= hole_start) 3128 if (size <= hole_start)
3080 return 0; 3129 return 0;
3081 3130
3082 btrfs_truncate_page(inode->i_mapping, inode->i_size); 3131 err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
3132 if (err)
3133 return err;
3083 3134
3084 while (1) { 3135 while (1) {
3085 struct btrfs_ordered_extent *ordered; 3136 struct btrfs_ordered_extent *ordered;
@@ -3448,6 +3499,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3448 bi->generation = 0; 3499 bi->generation = 0;
3449 bi->sequence = 0; 3500 bi->sequence = 0;
3450 bi->last_trans = 0; 3501 bi->last_trans = 0;
3502 bi->last_sub_trans = 0;
3451 bi->logged_trans = 0; 3503 bi->logged_trans = 0;
3452 bi->delalloc_bytes = 0; 3504 bi->delalloc_bytes = 0;
3453 bi->reserved_bytes = 0; 3505 bi->reserved_bytes = 0;
@@ -3598,12 +3650,14 @@ static int btrfs_dentry_delete(struct dentry *dentry)
3598{ 3650{
3599 struct btrfs_root *root; 3651 struct btrfs_root *root;
3600 3652
3601 if (!dentry->d_inode) 3653 if (!dentry->d_inode && !IS_ROOT(dentry))
3602 return 0; 3654 dentry = dentry->d_parent;
3603 3655
3604 root = BTRFS_I(dentry->d_inode)->root; 3656 if (dentry->d_inode) {
3605 if (btrfs_root_refs(&root->root_item) == 0) 3657 root = BTRFS_I(dentry->d_inode)->root;
3606 return 1; 3658 if (btrfs_root_refs(&root->root_item) == 0)
3659 return 1;
3660 }
3607 return 0; 3661 return 0;
3608} 3662}
3609 3663
@@ -4808,7 +4862,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4808 */ 4862 */
4809 clear_extent_bit(tree, page_start, page_end, 4863 clear_extent_bit(tree, page_start, page_end,
4810 EXTENT_DIRTY | EXTENT_DELALLOC | 4864 EXTENT_DIRTY | EXTENT_DELALLOC |
4811 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); 4865 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4866 NULL, GFP_NOFS);
4812 /* 4867 /*
4813 * whoever cleared the private bit is responsible 4868 * whoever cleared the private bit is responsible
4814 * for the finish_ordered_io 4869 * for the finish_ordered_io
@@ -4821,8 +4876,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4821 lock_extent(tree, page_start, page_end, GFP_NOFS); 4876 lock_extent(tree, page_start, page_end, GFP_NOFS);
4822 } 4877 }
4823 clear_extent_bit(tree, page_start, page_end, 4878 clear_extent_bit(tree, page_start, page_end,
4824 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 4879 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4825 1, 1, NULL, GFP_NOFS); 4880 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
4826 __btrfs_releasepage(page, GFP_NOFS); 4881 __btrfs_releasepage(page, GFP_NOFS);
4827 4882
4828 ClearPageChecked(page); 4883 ClearPageChecked(page);
@@ -4917,7 +4972,8 @@ again:
4917 * prepare_pages in the normal write path. 4972 * prepare_pages in the normal write path.
4918 */ 4973 */
4919 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 4974 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
4920 EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS); 4975 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
4976 GFP_NOFS);
4921 4977
4922 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 4978 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
4923 if (ret) { 4979 if (ret) {
@@ -4944,7 +5000,9 @@ again:
4944 set_page_dirty(page); 5000 set_page_dirty(page);
4945 SetPageUptodate(page); 5001 SetPageUptodate(page);
4946 5002
4947 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 5003 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5004 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5005
4948 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5006 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4949 5007
4950out_unlock: 5008out_unlock:
@@ -4969,7 +5027,9 @@ static void btrfs_truncate(struct inode *inode)
4969 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 5027 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4970 return; 5028 return;
4971 5029
4972 btrfs_truncate_page(inode->i_mapping, inode->i_size); 5030 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5031 if (ret)
5032 return;
4973 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 5033 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4974 5034
4975 trans = btrfs_start_transaction(root, 1); 5035 trans = btrfs_start_transaction(root, 1);
@@ -5064,9 +5124,11 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
5064 if (!ei) 5124 if (!ei)
5065 return NULL; 5125 return NULL;
5066 ei->last_trans = 0; 5126 ei->last_trans = 0;
5127 ei->last_sub_trans = 0;
5067 ei->logged_trans = 0; 5128 ei->logged_trans = 0;
5068 ei->delalloc_extents = 0; 5129 ei->outstanding_extents = 0;
5069 ei->delalloc_reserved_extents = 0; 5130 ei->reserved_extents = 0;
5131 spin_lock_init(&ei->accounting_lock);
5070 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5132 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5071 INIT_LIST_HEAD(&ei->i_orphan); 5133 INIT_LIST_HEAD(&ei->i_orphan);
5072 INIT_LIST_HEAD(&ei->ordered_operations); 5134 INIT_LIST_HEAD(&ei->ordered_operations);
@@ -5805,6 +5867,6 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
5805 .removexattr = btrfs_removexattr, 5867 .removexattr = btrfs_removexattr,
5806}; 5868};
5807 5869
5808struct dentry_operations btrfs_dentry_operations = { 5870const struct dentry_operations btrfs_dentry_operations = {
5809 .d_delete = btrfs_dentry_delete, 5871 .d_delete = btrfs_dentry_delete,
5810}; 5872};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 9a780c8d0ac8..cdbb054102b9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -830,6 +830,7 @@ out_up_write:
830out_unlock: 830out_unlock:
831 mutex_unlock(&inode->i_mutex); 831 mutex_unlock(&inode->i_mutex);
832 if (!err) { 832 if (!err) {
833 shrink_dcache_sb(root->fs_info->sb);
833 btrfs_invalidate_inodes(dest); 834 btrfs_invalidate_inodes(dest);
834 d_delete(dentry); 835 d_delete(dentry);
835 } 836 }
@@ -1122,8 +1123,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1122 datao += off - key.offset; 1123 datao += off - key.offset;
1123 datal -= off - key.offset; 1124 datal -= off - key.offset;
1124 } 1125 }
1125 if (key.offset + datao + datal > off + len) 1126
1126 datal = off + len - key.offset - datao; 1127 if (key.offset + datal > off + len)
1128 datal = off + len - key.offset;
1129
1127 /* disko == 0 means it's a hole */ 1130 /* disko == 0 means it's a hole */
1128 if (!disko) 1131 if (!disko)
1129 datao = 0; 1132 datao = 0;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 897fba835f89..5799bc46a309 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -306,6 +306,12 @@ int btrfs_remove_ordered_extent(struct inode *inode,
306 tree->last = NULL; 306 tree->last = NULL;
307 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 307 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
308 308
309 spin_lock(&BTRFS_I(inode)->accounting_lock);
310 BTRFS_I(inode)->outstanding_extents--;
311 spin_unlock(&BTRFS_I(inode)->accounting_lock);
312 btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
313 inode, 1);
314
309 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 315 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
310 list_del_init(&entry->root_extent_list); 316 list_del_init(&entry->root_extent_list);
311 317
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 361ad323faac..cfcc93c93a7b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3518,7 +3518,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3518 BUG_ON(!rc->block_group); 3518 BUG_ON(!rc->block_group);
3519 3519
3520 btrfs_init_workers(&rc->workers, "relocate", 3520 btrfs_init_workers(&rc->workers, "relocate",
3521 fs_info->thread_pool_size); 3521 fs_info->thread_pool_size, NULL);
3522 3522
3523 rc->extent_root = extent_root; 3523 rc->extent_root = extent_root;
3524 btrfs_prepare_block_group_relocation(extent_root, rc->block_group); 3524 btrfs_prepare_block_group_relocation(extent_root, rc->block_group);
@@ -3701,7 +3701,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
3701 mapping_tree_init(&rc->reloc_root_tree); 3701 mapping_tree_init(&rc->reloc_root_tree);
3702 INIT_LIST_HEAD(&rc->reloc_roots); 3702 INIT_LIST_HEAD(&rc->reloc_roots);
3703 btrfs_init_workers(&rc->workers, "relocate", 3703 btrfs_init_workers(&rc->workers, "relocate",
3704 root->fs_info->thread_pool_size); 3704 root->fs_info->thread_pool_size, NULL);
3705 rc->extent_root = root->fs_info->extent_root; 3705 rc->extent_root = root->fs_info->extent_root;
3706 3706
3707 set_reloc_control(rc); 3707 set_reloc_control(rc);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9de9b2236419..752a5463bf53 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,8 @@ enum {
66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, 66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, 67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, 68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_err, 69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
70 Opt_discard, Opt_err,
70}; 71};
71 72
72static match_table_t tokens = { 73static match_table_t tokens = {
@@ -88,6 +89,7 @@ static match_table_t tokens = {
88 {Opt_notreelog, "notreelog"}, 89 {Opt_notreelog, "notreelog"},
89 {Opt_flushoncommit, "flushoncommit"}, 90 {Opt_flushoncommit, "flushoncommit"},
90 {Opt_ratio, "metadata_ratio=%d"}, 91 {Opt_ratio, "metadata_ratio=%d"},
92 {Opt_discard, "discard"},
91 {Opt_err, NULL}, 93 {Opt_err, NULL},
92}; 94};
93 95
@@ -257,6 +259,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
257 info->metadata_ratio); 259 info->metadata_ratio);
258 } 260 }
259 break; 261 break;
262 case Opt_discard:
263 btrfs_set_opt(info->mount_opt, DISCARD);
264 break;
260 default: 265 default:
261 break; 266 break;
262 } 267 }
@@ -344,7 +349,7 @@ static int btrfs_fill_super(struct super_block *sb,
344 sb->s_export_op = &btrfs_export_ops; 349 sb->s_export_op = &btrfs_export_ops;
345 sb->s_xattr = btrfs_xattr_handlers; 350 sb->s_xattr = btrfs_xattr_handlers;
346 sb->s_time_gran = 1; 351 sb->s_time_gran = 1;
347#ifdef CONFIG_BTRFS_POSIX_ACL 352#ifdef CONFIG_BTRFS_FS_POSIX_ACL
348 sb->s_flags |= MS_POSIXACL; 353 sb->s_flags |= MS_POSIXACL;
349#endif 354#endif
350 355
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 0b8f36d4400a..bca82a4ca8e6 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -344,10 +344,10 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
344/* 344/*
345 * when btree blocks are allocated, they have some corresponding bits set for 345 * when btree blocks are allocated, they have some corresponding bits set for
346 * them in one of two extent_io trees. This is used to make sure all of 346 * them in one of two extent_io trees. This is used to make sure all of
347 * those extents are on disk for transaction or log commit 347 * those extents are sent to disk but does not wait on them
348 */ 348 */
349int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 349int btrfs_write_marked_extents(struct btrfs_root *root,
350 struct extent_io_tree *dirty_pages) 350 struct extent_io_tree *dirty_pages)
351{ 351{
352 int ret; 352 int ret;
353 int err = 0; 353 int err = 0;
@@ -394,6 +394,29 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
394 page_cache_release(page); 394 page_cache_release(page);
395 } 395 }
396 } 396 }
397 if (err)
398 werr = err;
399 return werr;
400}
401
402/*
403 * when btree blocks are allocated, they have some corresponding bits set for
404 * them in one of two extent_io trees. This is used to make sure all of
405 * those extents are on disk for transaction or log commit. We wait
406 * on all the pages and clear them from the dirty pages state tree
407 */
408int btrfs_wait_marked_extents(struct btrfs_root *root,
409 struct extent_io_tree *dirty_pages)
410{
411 int ret;
412 int err = 0;
413 int werr = 0;
414 struct page *page;
415 struct inode *btree_inode = root->fs_info->btree_inode;
416 u64 start = 0;
417 u64 end;
418 unsigned long index;
419
397 while (1) { 420 while (1) {
398 ret = find_first_extent_bit(dirty_pages, 0, &start, &end, 421 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
399 EXTENT_DIRTY); 422 EXTENT_DIRTY);
@@ -424,6 +447,22 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
424 return werr; 447 return werr;
425} 448}
426 449
450/*
451 * when btree blocks are allocated, they have some corresponding bits set for
452 * them in one of two extent_io trees. This is used to make sure all of
453 * those extents are on disk for transaction or log commit
454 */
455int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
456 struct extent_io_tree *dirty_pages)
457{
458 int ret;
459 int ret2;
460
461 ret = btrfs_write_marked_extents(root, dirty_pages);
462 ret2 = btrfs_wait_marked_extents(root, dirty_pages);
463 return ret || ret2;
464}
465
427int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 466int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
428 struct btrfs_root *root) 467 struct btrfs_root *root)
429{ 468{
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 663c67404918..d4e3e7a6938c 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -79,6 +79,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
79 struct inode *inode) 79 struct inode *inode)
80{ 80{
81 BTRFS_I(inode)->last_trans = trans->transaction->transid; 81 BTRFS_I(inode)->last_trans = trans->transaction->transid;
82 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
82} 83}
83 84
84int btrfs_end_transaction(struct btrfs_trans_handle *trans, 85int btrfs_end_transaction(struct btrfs_trans_handle *trans,
@@ -107,5 +108,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root); 108 struct btrfs_root *root);
108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 109int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
109 struct extent_io_tree *dirty_pages); 110 struct extent_io_tree *dirty_pages);
111int btrfs_write_marked_extents(struct btrfs_root *root,
112 struct extent_io_tree *dirty_pages);
113int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages);
110int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 115int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
111#endif 116#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 7827841b55cb..741666a7676a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -137,11 +137,20 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
137 137
138 mutex_lock(&root->log_mutex); 138 mutex_lock(&root->log_mutex);
139 if (root->log_root) { 139 if (root->log_root) {
140 if (!root->log_start_pid) {
141 root->log_start_pid = current->pid;
142 root->log_multiple_pids = false;
143 } else if (root->log_start_pid != current->pid) {
144 root->log_multiple_pids = true;
145 }
146
140 root->log_batch++; 147 root->log_batch++;
141 atomic_inc(&root->log_writers); 148 atomic_inc(&root->log_writers);
142 mutex_unlock(&root->log_mutex); 149 mutex_unlock(&root->log_mutex);
143 return 0; 150 return 0;
144 } 151 }
152 root->log_multiple_pids = false;
153 root->log_start_pid = current->pid;
145 mutex_lock(&root->fs_info->tree_log_mutex); 154 mutex_lock(&root->fs_info->tree_log_mutex);
146 if (!root->fs_info->log_root_tree) { 155 if (!root->fs_info->log_root_tree) {
147 ret = btrfs_init_log_root_tree(trans, root->fs_info); 156 ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -1971,6 +1980,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1971 int ret; 1980 int ret;
1972 struct btrfs_root *log = root->log_root; 1981 struct btrfs_root *log = root->log_root;
1973 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 1982 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1983 u64 log_transid = 0;
1974 1984
1975 mutex_lock(&root->log_mutex); 1985 mutex_lock(&root->log_mutex);
1976 index1 = root->log_transid % 2; 1986 index1 = root->log_transid % 2;
@@ -1987,10 +1997,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1987 1997
1988 while (1) { 1998 while (1) {
1989 unsigned long batch = root->log_batch; 1999 unsigned long batch = root->log_batch;
1990 mutex_unlock(&root->log_mutex); 2000 if (root->log_multiple_pids) {
1991 schedule_timeout_uninterruptible(1); 2001 mutex_unlock(&root->log_mutex);
1992 mutex_lock(&root->log_mutex); 2002 schedule_timeout_uninterruptible(1);
1993 2003 mutex_lock(&root->log_mutex);
2004 }
1994 wait_for_writer(trans, root); 2005 wait_for_writer(trans, root);
1995 if (batch == root->log_batch) 2006 if (batch == root->log_batch)
1996 break; 2007 break;
@@ -2003,14 +2014,19 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2003 goto out; 2014 goto out;
2004 } 2015 }
2005 2016
2006 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); 2017 /* we start IO on all the marked extents here, but we don't actually
2018 * wait for them until later.
2019 */
2020 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
2007 BUG_ON(ret); 2021 BUG_ON(ret);
2008 2022
2009 btrfs_set_root_node(&log->root_item, log->node); 2023 btrfs_set_root_node(&log->root_item, log->node);
2010 2024
2011 root->log_batch = 0; 2025 root->log_batch = 0;
2026 log_transid = root->log_transid;
2012 root->log_transid++; 2027 root->log_transid++;
2013 log->log_transid = root->log_transid; 2028 log->log_transid = root->log_transid;
2029 root->log_start_pid = 0;
2014 smp_mb(); 2030 smp_mb();
2015 /* 2031 /*
2016 * log tree has been flushed to disk, new modifications of 2032 * log tree has been flushed to disk, new modifications of
@@ -2036,6 +2052,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2036 2052
2037 index2 = log_root_tree->log_transid % 2; 2053 index2 = log_root_tree->log_transid % 2;
2038 if (atomic_read(&log_root_tree->log_commit[index2])) { 2054 if (atomic_read(&log_root_tree->log_commit[index2])) {
2055 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2039 wait_log_commit(trans, log_root_tree, 2056 wait_log_commit(trans, log_root_tree,
2040 log_root_tree->log_transid); 2057 log_root_tree->log_transid);
2041 mutex_unlock(&log_root_tree->log_mutex); 2058 mutex_unlock(&log_root_tree->log_mutex);
@@ -2055,6 +2072,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2055 * check the full commit flag again 2072 * check the full commit flag again
2056 */ 2073 */
2057 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2074 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2075 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2058 mutex_unlock(&log_root_tree->log_mutex); 2076 mutex_unlock(&log_root_tree->log_mutex);
2059 ret = -EAGAIN; 2077 ret = -EAGAIN;
2060 goto out_wake_log_root; 2078 goto out_wake_log_root;
@@ -2063,6 +2081,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2063 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2081 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2064 &log_root_tree->dirty_log_pages); 2082 &log_root_tree->dirty_log_pages);
2065 BUG_ON(ret); 2083 BUG_ON(ret);
2084 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2066 2085
2067 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 2086 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
2068 log_root_tree->node->start); 2087 log_root_tree->node->start);
@@ -2082,9 +2101,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2082 * the running transaction open, so a full commit can't hop 2101 * the running transaction open, so a full commit can't hop
2083 * in and cause problems either. 2102 * in and cause problems either.
2084 */ 2103 */
2085 write_ctree_super(trans, root->fs_info->tree_root, 2); 2104 write_ctree_super(trans, root->fs_info->tree_root, 1);
2086 ret = 0; 2105 ret = 0;
2087 2106
2107 mutex_lock(&root->log_mutex);
2108 if (root->last_log_commit < log_transid)
2109 root->last_log_commit = log_transid;
2110 mutex_unlock(&root->log_mutex);
2111
2088out_wake_log_root: 2112out_wake_log_root:
2089 atomic_set(&log_root_tree->log_commit[index2], 0); 2113 atomic_set(&log_root_tree->log_commit[index2], 0);
2090 smp_mb(); 2114 smp_mb();
@@ -2852,6 +2876,21 @@ out:
2852 return ret; 2876 return ret;
2853} 2877}
2854 2878
2879static int inode_in_log(struct btrfs_trans_handle *trans,
2880 struct inode *inode)
2881{
2882 struct btrfs_root *root = BTRFS_I(inode)->root;
2883 int ret = 0;
2884
2885 mutex_lock(&root->log_mutex);
2886 if (BTRFS_I(inode)->logged_trans == trans->transid &&
2887 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
2888 ret = 1;
2889 mutex_unlock(&root->log_mutex);
2890 return ret;
2891}
2892
2893
2855/* 2894/*
2856 * helper function around btrfs_log_inode to make sure newly created 2895 * helper function around btrfs_log_inode to make sure newly created
2857 * parent directories also end up in the log. A minimal inode and backref 2896 * parent directories also end up in the log. A minimal inode and backref
@@ -2891,6 +2930,11 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2891 if (ret) 2930 if (ret)
2892 goto end_no_trans; 2931 goto end_no_trans;
2893 2932
2933 if (inode_in_log(trans, inode)) {
2934 ret = BTRFS_NO_LOG_SYNC;
2935 goto end_no_trans;
2936 }
2937
2894 start_log_trans(trans, root); 2938 start_log_trans(trans, root);
2895 2939
2896 ret = btrfs_log_inode(trans, root, inode, inode_only); 2940 ret = btrfs_log_inode(trans, root, inode, inode_only);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index d09c7609e16b..0776eacb5083 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -19,6 +19,9 @@
19#ifndef __TREE_LOG_ 19#ifndef __TREE_LOG_
20#define __TREE_LOG_ 20#define __TREE_LOG_
21 21
22/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
23#define BTRFS_NO_LOG_SYNC 256
24
22int btrfs_sync_log(struct btrfs_trans_handle *trans, 25int btrfs_sync_log(struct btrfs_trans_handle *trans,
23 struct btrfs_root *root); 26 struct btrfs_root *root);
24int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); 27int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index b0fc93f95fd0..b6dd5967c48a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -260,7 +260,7 @@ err:
260 * attributes are handled directly. 260 * attributes are handled directly.
261 */ 261 */
262struct xattr_handler *btrfs_xattr_handlers[] = { 262struct xattr_handler *btrfs_xattr_handlers[] = {
263#ifdef CONFIG_BTRFS_POSIX_ACL 263#ifdef CONFIG_BTRFS_FS_POSIX_ACL
264 &btrfs_xattr_acl_access_handler, 264 &btrfs_xattr_acl_access_handler,
265 &btrfs_xattr_acl_default_handler, 265 &btrfs_xattr_acl_default_handler,
266#endif 266#endif
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 43003e0bef18..b09098079916 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1577,7 +1577,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1577 1577
1578out_err: 1578out_err:
1579 if (tcp_ses) { 1579 if (tcp_ses) {
1580 kfree(tcp_ses->hostname); 1580 if (!IS_ERR(tcp_ses->hostname))
1581 kfree(tcp_ses->hostname);
1581 if (tcp_ses->ssocket) 1582 if (tcp_ses->ssocket)
1582 sock_release(tcp_ses->ssocket); 1583 sock_release(tcp_ses->ssocket);
1583 kfree(tcp_ses); 1584 kfree(tcp_ses);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 240cef14fe58..70736eb4b516 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -316,6 +316,10 @@ int dlm_lowcomms_connect_node(int nodeid)
316{ 316{
317 struct connection *con; 317 struct connection *con;
318 318
319 /* with sctp there's no connecting without sending */
320 if (dlm_config.ci_protocol != 0)
321 return 0;
322
319 if (nodeid == dlm_our_nodeid()) 323 if (nodeid == dlm_our_nodeid())
320 return 0; 324 return 0;
321 325
@@ -455,9 +459,9 @@ static void process_sctp_notification(struct connection *con,
455 int prim_len, ret; 459 int prim_len, ret;
456 int addr_len; 460 int addr_len;
457 struct connection *new_con; 461 struct connection *new_con;
458 struct file *file;
459 sctp_peeloff_arg_t parg; 462 sctp_peeloff_arg_t parg;
460 int parglen = sizeof(parg); 463 int parglen = sizeof(parg);
464 int err;
461 465
462 /* 466 /*
463 * We get this before any data for an association. 467 * We get this before any data for an association.
@@ -512,19 +516,22 @@ static void process_sctp_notification(struct connection *con,
512 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 516 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP,
513 SCTP_SOCKOPT_PEELOFF, 517 SCTP_SOCKOPT_PEELOFF,
514 (void *)&parg, &parglen); 518 (void *)&parg, &parglen);
515 if (ret) { 519 if (ret < 0) {
516 log_print("Can't peel off a socket for " 520 log_print("Can't peel off a socket for "
517 "connection %d to node %d: err=%d\n", 521 "connection %d to node %d: err=%d",
518 parg.associd, nodeid, ret); 522 parg.associd, nodeid, ret);
523 return;
524 }
525 new_con->sock = sockfd_lookup(parg.sd, &err);
526 if (!new_con->sock) {
527 log_print("sockfd_lookup error %d", err);
528 return;
519 } 529 }
520 file = fget(parg.sd);
521 new_con->sock = SOCKET_I(file->f_dentry->d_inode);
522 add_sock(new_con->sock, new_con); 530 add_sock(new_con->sock, new_con);
523 fput(file); 531 sockfd_put(new_con->sock);
524 put_unused_fd(parg.sd);
525 532
526 log_print("got new/restarted association %d nodeid %d", 533 log_print("connecting to %d sctp association %d",
527 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 534 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
528 535
529 /* Send any pending writes */ 536 /* Send any pending writes */
530 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 537 clear_bit(CF_CONNECT_PENDING, &new_con->flags);
@@ -837,8 +844,6 @@ static void sctp_init_assoc(struct connection *con)
837 if (con->retries++ > MAX_CONNECT_RETRIES) 844 if (con->retries++ > MAX_CONNECT_RETRIES)
838 return; 845 return;
839 846
840 log_print("Initiating association with node %d", con->nodeid);
841
842 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 847 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) {
843 log_print("no address for nodeid %d", con->nodeid); 848 log_print("no address for nodeid %d", con->nodeid);
844 return; 849 return;
@@ -855,11 +860,14 @@ static void sctp_init_assoc(struct connection *con)
855 outmessage.msg_flags = MSG_EOR; 860 outmessage.msg_flags = MSG_EOR;
856 861
857 spin_lock(&con->writequeue_lock); 862 spin_lock(&con->writequeue_lock);
858 e = list_entry(con->writequeue.next, struct writequeue_entry,
859 list);
860 863
861 BUG_ON((struct list_head *) e == &con->writequeue); 864 if (list_empty(&con->writequeue)) {
865 spin_unlock(&con->writequeue_lock);
866 log_print("writequeue empty for nodeid %d", con->nodeid);
867 return;
868 }
862 869
870 e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
863 len = e->len; 871 len = e->len;
864 offset = e->offset; 872 offset = e->offset;
865 spin_unlock(&con->writequeue_lock); 873 spin_unlock(&con->writequeue_lock);
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
index 8aadb99b7634..1cd6d9d3e29a 100644
--- a/fs/ecryptfs/Kconfig
+++ b/fs/ecryptfs/Kconfig
@@ -1,8 +1,9 @@
1config ECRYPT_FS 1config ECRYPT_FS
2 tristate "eCrypt filesystem layer support (EXPERIMENTAL)" 2 tristate "eCrypt filesystem layer support (EXPERIMENTAL)"
3 depends on EXPERIMENTAL && KEYS && NET 3 depends on EXPERIMENTAL && KEYS && CRYPTO
4 select CRYPTO_ECB 4 select CRYPTO_ECB
5 select CRYPTO_CBC 5 select CRYPTO_CBC
6 select CRYPTO_MD5
6 help 7 help
7 Encrypted filesystem that operates on the VFS layer. See 8 Encrypted filesystem that operates on the VFS layer. See
8 <file:Documentation/filesystems/ecryptfs.txt> to learn more about 9 <file:Documentation/filesystems/ecryptfs.txt> to learn more about
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 101fe4c7b1ee..c6ac85d6c701 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -35,6 +35,7 @@
35#include <linux/key.h> 35#include <linux/key.h>
36#include <linux/parser.h> 36#include <linux/parser.h>
37#include <linux/fs_stack.h> 37#include <linux/fs_stack.h>
38#include <linux/ima.h>
38#include "ecryptfs_kernel.h" 39#include "ecryptfs_kernel.h"
39 40
40/** 41/**
@@ -118,6 +119,7 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
118 const struct cred *cred = current_cred(); 119 const struct cred *cred = current_cred();
119 struct ecryptfs_inode_info *inode_info = 120 struct ecryptfs_inode_info *inode_info =
120 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 121 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
122 int opened_lower_file = 0;
121 int rc = 0; 123 int rc = 0;
122 124
123 mutex_lock(&inode_info->lower_file_mutex); 125 mutex_lock(&inode_info->lower_file_mutex);
@@ -134,9 +136,12 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
134 "for lower_dentry [0x%p] and lower_mnt [0x%p]; " 136 "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
135 "rc = [%d]\n", lower_dentry, lower_mnt, rc); 137 "rc = [%d]\n", lower_dentry, lower_mnt, rc);
136 inode_info->lower_file = NULL; 138 inode_info->lower_file = NULL;
137 } 139 } else
140 opened_lower_file = 1;
138 } 141 }
139 mutex_unlock(&inode_info->lower_file_mutex); 142 mutex_unlock(&inode_info->lower_file_mutex);
143 if (opened_lower_file)
144 ima_counts_get(inode_info->lower_file);
140 return rc; 145 return rc;
141} 146}
142 147
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 72743d360509..7a520a862f49 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2321,7 +2321,18 @@ static int ext3_commit_super(struct super_block *sb,
2321 2321
2322 if (!sbh) 2322 if (!sbh)
2323 return error; 2323 return error;
2324 es->s_wtime = cpu_to_le32(get_seconds()); 2324 /*
2325 * If the file system is mounted read-only, don't update the
2326 * superblock write time. This avoids updating the superblock
2327 * write time when we are mounting the root file system
2328 * read/only but we need to replay the journal; at that point,
2329 * for people who are east of GMT and who make their clock
2330 * tick in localtime for Windows bug-for-bug compatibility,
2331 * the clock is set in the future, and this will cause e2fsck
2332 * to complain and force a full file system check.
2333 */
2334 if (!(sb->s_flags & MS_RDONLY))
2335 es->s_wtime = cpu_to_le32(get_seconds());
2325 es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); 2336 es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
2326 es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); 2337 es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
2327 BUFFER_TRACE(sbh, "marking dirty"); 2338 BUFFER_TRACE(sbh, "marking dirty");
diff --git a/fs/file.c b/fs/file.c
index f313314f996f..87e129030ab1 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
15#include <linux/file.h> 16#include <linux/file.h>
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 9b9d6395bad3..052f214ea6f0 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -58,6 +58,11 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
58 } 58 }
59 unlock_new_inode(tree->inode); 59 unlock_new_inode(tree->inode);
60 60
61 if (!HFS_I(tree->inode)->first_blocks) {
62 printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
63 goto free_inode;
64 }
65
61 mapping = tree->inode->i_mapping; 66 mapping = tree->inode->i_mapping;
62 page = read_mapping_page(mapping, 0, NULL); 67 page = read_mapping_page(mapping, 0, NULL);
63 if (IS_ERR(page)) 68 if (IS_ERR(page))
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 175d08eacc86..bed78ac8f6d1 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -99,6 +99,10 @@ int hfsplus_read_wrapper(struct super_block *sb)
99 99
100 if (hfsplus_get_last_session(sb, &part_start, &part_size)) 100 if (hfsplus_get_last_session(sb, &part_start, &part_size))
101 return -EINVAL; 101 return -EINVAL;
102 if ((u64)part_start + part_size > 0x100000000ULL) {
103 pr_err("hfs: volumes larger than 2TB are not supported yet\n");
104 return -EINVAL;
105 }
102 while (1) { 106 while (1) {
103 bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr); 107 bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
104 if (!bh) 108 if (!bh)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 63976c0ccc25..99ea196f071f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1180,7 +1180,7 @@ static int nfs4_init_client(struct nfs_client *clp,
1180 1, flags & NFS_MOUNT_NORESVPORT); 1180 1, flags & NFS_MOUNT_NORESVPORT);
1181 if (error < 0) 1181 if (error < 0)
1182 goto error; 1182 goto error;
1183 memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); 1183 strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
1184 1184
1185 error = nfs_idmap_new(clp); 1185 error = nfs_idmap_new(clp);
1186 if (error < 0) { 1186 if (error < 0) {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 32062c33c859..7cb298525eef 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1536,6 +1536,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1536 old_dentry->d_parent->d_name.name, old_dentry->d_name.name, 1536 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
1537 dentry->d_parent->d_name.name, dentry->d_name.name); 1537 dentry->d_parent->d_name.name, dentry->d_name.name);
1538 1538
1539 nfs_inode_return_delegation(inode);
1540
1539 d_drop(dentry); 1541 d_drop(dentry);
1540 error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); 1542 error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
1541 if (error == 0) { 1543 if (error == 0) {
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 6c3210099d51..e1d415e97849 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -457,6 +457,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
457 }; 457 };
458 struct rpc_task_setup task_setup_data = { 458 struct rpc_task_setup task_setup_data = {
459 .rpc_client = NFS_CLIENT(inode), 459 .rpc_client = NFS_CLIENT(inode),
460 .rpc_message = &msg,
460 .callback_ops = &nfs_write_direct_ops, 461 .callback_ops = &nfs_write_direct_ops,
461 .workqueue = nfsiod_workqueue, 462 .workqueue = nfsiod_workqueue,
462 .flags = RPC_TASK_ASYNC, 463 .flags = RPC_TASK_ASYNC,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2636c26d56fa..fa3408f20112 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -121,7 +121,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
121 121
122 mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE); 122 mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE);
123 if (IS_ERR(mnt_path)) 123 if (IS_ERR(mnt_path))
124 return mnt; 124 return ERR_CAST(mnt_path);
125 mountdata->mnt_path = mnt_path; 125 mountdata->mnt_path = mnt_path;
126 maxbuflen = mnt_path - 1 - page2; 126 maxbuflen = mnt_path - 1 - page2;
127 127
@@ -132,15 +132,15 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
132 if (buf->len <= 0 || buf->len >= maxbuflen) 132 if (buf->len <= 0 || buf->len >= maxbuflen)
133 continue; 133 continue;
134 134
135 mountdata->addr = (struct sockaddr *)&addr;
136
137 if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len)) 135 if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
138 continue; 136 continue;
139 mountdata->addrlen = nfs_parse_server_name(buf->data, 137
140 buf->len, 138 mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
141 mountdata->addr, mountdata->addrlen); 139 (struct sockaddr *)&addr, sizeof(addr));
142 if (mountdata->addrlen == 0) 140 if (mountdata->addrlen == 0)
143 continue; 141 continue;
142
143 mountdata->addr = (struct sockaddr *)&addr;
144 rpc_set_port(mountdata->addr, NFS_PORT); 144 rpc_set_port(mountdata->addr, NFS_PORT);
145 145
146 memcpy(page2, buf->data, buf->len); 146 memcpy(page2, buf->data, buf->len);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ed7c269e2514..ff37454fa783 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -72,12 +72,17 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
72/* Prevent leaks of NFSv4 errors into userland */ 72/* Prevent leaks of NFSv4 errors into userland */
73static int nfs4_map_errors(int err) 73static int nfs4_map_errors(int err)
74{ 74{
75 if (err < -1000) { 75 if (err >= -1000)
76 return err;
77 switch (err) {
78 case -NFS4ERR_RESOURCE:
79 return -EREMOTEIO;
80 default:
76 dprintk("%s could not handle NFSv4 error %d\n", 81 dprintk("%s could not handle NFSv4 error %d\n",
77 __func__, -err); 82 __func__, -err);
78 return -EIO; 83 break;
79 } 84 }
80 return err; 85 return -EIO;
81} 86}
82 87
83/* 88/*
@@ -3060,9 +3065,6 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
3060 if (time_before(clp->cl_last_renewal,timestamp)) 3065 if (time_before(clp->cl_last_renewal,timestamp))
3061 clp->cl_last_renewal = timestamp; 3066 clp->cl_last_renewal = timestamp;
3062 spin_unlock(&clp->cl_lock); 3067 spin_unlock(&clp->cl_lock);
3063 dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
3064 task->tk_msg.rpc_cred);
3065 put_rpccred(task->tk_msg.rpc_cred);
3066} 3068}
3067 3069
3068static const struct rpc_call_ops nfs4_renew_ops = { 3070static const struct rpc_call_ops nfs4_renew_ops = {
@@ -4877,7 +4879,6 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
4877 nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp); 4879 nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
4878 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 4880 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
4879 4881
4880 put_rpccred(task->tk_msg.rpc_cred);
4881 kfree(task->tk_msg.rpc_argp); 4882 kfree(task->tk_msg.rpc_argp);
4882 kfree(task->tk_msg.rpc_resp); 4883 kfree(task->tk_msg.rpc_resp);
4883 4884
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index e27c6cef18f2..0156c01c212c 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -127,12 +127,6 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
127} 127}
128 128
129void 129void
130nfs4_renewd_prepare_shutdown(struct nfs_server *server)
131{
132 cancel_delayed_work(&server->nfs_client->cl_renewd);
133}
134
135void
136nfs4_kill_renewd(struct nfs_client *clp) 130nfs4_kill_renewd(struct nfs_client *clp)
137{ 131{
138 cancel_delayed_work_sync(&clp->cl_renewd); 132 cancel_delayed_work_sync(&clp->cl_renewd);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 83ad47cbdd8a..20b4e30e6c82 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5681,7 +5681,6 @@ static struct {
5681 { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, 5681 { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
5682 { NFS4ERR_BADTYPE, -EBADTYPE }, 5682 { NFS4ERR_BADTYPE, -EBADTYPE },
5683 { NFS4ERR_LOCKED, -EAGAIN }, 5683 { NFS4ERR_LOCKED, -EAGAIN },
5684 { NFS4ERR_RESOURCE, -EREMOTEIO },
5685 { NFS4ERR_SYMLINK, -ELOOP }, 5684 { NFS4ERR_SYMLINK, -ELOOP },
5686 { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, 5685 { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
5687 { NFS4ERR_DEADLOCK, -EDEADLK }, 5686 { NFS4ERR_DEADLOCK, -EDEADLK },
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 29786d3b9326..90be551b80c1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -728,22 +728,24 @@ static void nfs_umount_begin(struct super_block *sb)
728 unlock_kernel(); 728 unlock_kernel();
729} 729}
730 730
731static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(int flags) 731static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
732{ 732{
733 struct nfs_parsed_mount_data *data; 733 struct nfs_parsed_mount_data *data;
734 734
735 data = kzalloc(sizeof(*data), GFP_KERNEL); 735 data = kzalloc(sizeof(*data), GFP_KERNEL);
736 if (data) { 736 if (data) {
737 data->flags = flags;
738 data->rsize = NFS_MAX_FILE_IO_SIZE; 737 data->rsize = NFS_MAX_FILE_IO_SIZE;
739 data->wsize = NFS_MAX_FILE_IO_SIZE; 738 data->wsize = NFS_MAX_FILE_IO_SIZE;
740 data->acregmin = NFS_DEF_ACREGMIN; 739 data->acregmin = NFS_DEF_ACREGMIN;
741 data->acregmax = NFS_DEF_ACREGMAX; 740 data->acregmax = NFS_DEF_ACREGMAX;
742 data->acdirmin = NFS_DEF_ACDIRMIN; 741 data->acdirmin = NFS_DEF_ACDIRMIN;
743 data->acdirmax = NFS_DEF_ACDIRMAX; 742 data->acdirmax = NFS_DEF_ACDIRMAX;
743 data->mount_server.port = NFS_UNSPEC_PORT;
744 data->nfs_server.port = NFS_UNSPEC_PORT; 744 data->nfs_server.port = NFS_UNSPEC_PORT;
745 data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
745 data->auth_flavors[0] = RPC_AUTH_UNIX; 746 data->auth_flavors[0] = RPC_AUTH_UNIX;
746 data->auth_flavor_len = 1; 747 data->auth_flavor_len = 1;
748 data->version = version;
747 data->minorversion = 0; 749 data->minorversion = 0;
748 } 750 }
749 return data; 751 return data;
@@ -776,15 +778,13 @@ static int nfs_verify_server_address(struct sockaddr *addr)
776 * Select between a default port value and a user-specified port value. 778 * Select between a default port value and a user-specified port value.
777 * If a zero value is set, then autobind will be used. 779 * If a zero value is set, then autobind will be used.
778 */ 780 */
779static void nfs_set_default_port(struct sockaddr *sap, const int parsed_port, 781static void nfs_set_port(struct sockaddr *sap, int *port,
780 const unsigned short default_port) 782 const unsigned short default_port)
781{ 783{
782 unsigned short port = default_port; 784 if (*port == NFS_UNSPEC_PORT)
785 *port = default_port;
783 786
784 if (parsed_port != NFS_UNSPEC_PORT) 787 rpc_set_port(sap, *port);
785 port = parsed_port;
786
787 rpc_set_port(sap, port);
788} 788}
789 789
790/* 790/*
@@ -1253,6 +1253,7 @@ static int nfs_parse_mount_options(char *raw,
1253 default: 1253 default:
1254 dfprintk(MOUNT, "NFS: unrecognized " 1254 dfprintk(MOUNT, "NFS: unrecognized "
1255 "transport protocol\n"); 1255 "transport protocol\n");
1256 kfree(string);
1256 return 0; 1257 return 0;
1257 } 1258 }
1258 break; 1259 break;
@@ -1475,7 +1476,7 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
1475 args->mount_server.addrlen = args->nfs_server.addrlen; 1476 args->mount_server.addrlen = args->nfs_server.addrlen;
1476 } 1477 }
1477 request.salen = args->mount_server.addrlen; 1478 request.salen = args->mount_server.addrlen;
1478 nfs_set_default_port(request.sap, args->mount_server.port, 0); 1479 nfs_set_port(request.sap, &args->mount_server.port, 0);
1479 1480
1480 /* 1481 /*
1481 * Now ask the mount server to map our export path 1482 * Now ask the mount server to map our export path
@@ -1711,8 +1712,6 @@ static int nfs_validate_mount_data(void *options,
1711 1712
1712 if (!(data->flags & NFS_MOUNT_TCP)) 1713 if (!(data->flags & NFS_MOUNT_TCP))
1713 args->nfs_server.protocol = XPRT_TRANSPORT_UDP; 1714 args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
1714 else
1715 args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
1716 /* N.B. caller will free nfs_server.hostname in all cases */ 1715 /* N.B. caller will free nfs_server.hostname in all cases */
1717 args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL); 1716 args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
1718 args->namlen = data->namlen; 1717 args->namlen = data->namlen;
@@ -1767,7 +1766,7 @@ static int nfs_validate_mount_data(void *options,
1767 goto out_v4_not_compiled; 1766 goto out_v4_not_compiled;
1768#endif 1767#endif
1769 1768
1770 nfs_set_default_port(sap, args->nfs_server.port, 0); 1769 nfs_set_port(sap, &args->nfs_server.port, 0);
1771 1770
1772 nfs_set_mount_transport_protocol(args); 1771 nfs_set_mount_transport_protocol(args);
1773 1772
@@ -1848,9 +1847,10 @@ nfs_compare_remount_data(struct nfs_server *nfss,
1848 data->acdirmin != nfss->acdirmin / HZ || 1847 data->acdirmin != nfss->acdirmin / HZ ||
1849 data->acdirmax != nfss->acdirmax / HZ || 1848 data->acdirmax != nfss->acdirmax / HZ ||
1850 data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || 1849 data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
1850 data->nfs_server.port != nfss->port ||
1851 data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || 1851 data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
1852 memcmp(&data->nfs_server.address, &nfss->nfs_client->cl_addr, 1852 !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
1853 data->nfs_server.addrlen) != 0) 1853 (struct sockaddr *)&nfss->nfs_client->cl_addr))
1854 return -EINVAL; 1854 return -EINVAL;
1855 1855
1856 return 0; 1856 return 0;
@@ -1893,6 +1893,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
1893 data->acdirmin = nfss->acdirmin / HZ; 1893 data->acdirmin = nfss->acdirmin / HZ;
1894 data->acdirmax = nfss->acdirmax / HZ; 1894 data->acdirmax = nfss->acdirmax / HZ;
1895 data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ; 1895 data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
1896 data->nfs_server.port = nfss->port;
1896 data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen; 1897 data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
1897 memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr, 1898 memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
1898 data->nfs_server.addrlen); 1899 data->nfs_server.addrlen);
@@ -2106,7 +2107,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
2106 }; 2107 };
2107 int error = -ENOMEM; 2108 int error = -ENOMEM;
2108 2109
2109 data = nfs_alloc_parsed_mount_data(NFS_MOUNT_VER3 | NFS_MOUNT_TCP); 2110 data = nfs_alloc_parsed_mount_data(3);
2110 mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL); 2111 mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
2111 if (data == NULL || mntfh == NULL) 2112 if (data == NULL || mntfh == NULL)
2112 goto out_free_fh; 2113 goto out_free_fh;
@@ -2331,7 +2332,7 @@ static int nfs4_validate_text_mount_data(void *options,
2331{ 2332{
2332 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address; 2333 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
2333 2334
2334 nfs_set_default_port(sap, args->nfs_server.port, NFS_PORT); 2335 nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
2335 2336
2336 nfs_validate_transport_protocol(args); 2337 nfs_validate_transport_protocol(args);
2337 2338
@@ -2376,7 +2377,6 @@ static int nfs4_validate_mount_data(void *options,
2376 if (data == NULL) 2377 if (data == NULL)
2377 goto out_no_data; 2378 goto out_no_data;
2378 2379
2379 args->version = 4;
2380 switch (data->version) { 2380 switch (data->version) {
2381 case 1: 2381 case 1:
2382 if (data->host_addrlen > sizeof(args->nfs_server.address)) 2382 if (data->host_addrlen > sizeof(args->nfs_server.address))
@@ -2660,7 +2660,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
2660 struct nfs_parsed_mount_data *data; 2660 struct nfs_parsed_mount_data *data;
2661 int error = -ENOMEM; 2661 int error = -ENOMEM;
2662 2662
2663 data = nfs_alloc_parsed_mount_data(0); 2663 data = nfs_alloc_parsed_mount_data(4);
2664 if (data == NULL) 2664 if (data == NULL)
2665 goto out_free_data; 2665 goto out_free_data;
2666 2666
@@ -2690,7 +2690,6 @@ static void nfs4_kill_super(struct super_block *sb)
2690 dprintk("--> %s\n", __func__); 2690 dprintk("--> %s\n", __func__);
2691 nfs_super_return_all_delegations(sb); 2691 nfs_super_return_all_delegations(sb);
2692 kill_anon_super(sb); 2692 kill_anon_super(sb);
2693 nfs4_renewd_prepare_shutdown(server);
2694 nfs_fscache_release_super_cookie(sb); 2693 nfs_fscache_release_super_cookie(sb);
2695 nfs_free_server(server); 2694 nfs_free_server(server);
2696 dprintk("<-- %s\n", __func__); 2695 dprintk("<-- %s\n", __func__);
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 828a889be909..7e54e52964dd 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -91,6 +91,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
91 struct dnotify_struct *dn; 91 struct dnotify_struct *dn;
92 struct dnotify_struct **prev; 92 struct dnotify_struct **prev;
93 struct fown_struct *fown; 93 struct fown_struct *fown;
94 __u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
94 95
95 to_tell = event->to_tell; 96 to_tell = event->to_tell;
96 97
@@ -106,7 +107,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
106 spin_lock(&entry->lock); 107 spin_lock(&entry->lock);
107 prev = &dnentry->dn; 108 prev = &dnentry->dn;
108 while ((dn = *prev) != NULL) { 109 while ((dn = *prev) != NULL) {
109 if ((dn->dn_mask & event->mask) == 0) { 110 if ((dn->dn_mask & test_mask) == 0) {
110 prev = &dn->dn_next; 111 prev = &dn->dn_next;
111 continue; 112 continue;
112 } 113 }
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index c8a07c65482b..3165d85aada2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -324,11 +324,11 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
324 spin_lock(&group->mark_lock); 324 spin_lock(&group->mark_lock);
325 spin_lock(&inode->i_lock); 325 spin_lock(&inode->i_lock);
326 326
327 entry->group = group;
328 entry->inode = inode;
329
330 lentry = fsnotify_find_mark_entry(group, inode); 327 lentry = fsnotify_find_mark_entry(group, inode);
331 if (!lentry) { 328 if (!lentry) {
329 entry->group = group;
330 entry->inode = inode;
331
332 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries); 332 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
333 list_add(&entry->g_list, &group->mark_entries); 333 list_add(&entry->g_list, &group->mark_entries);
334 334
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 3816d5750dd5..b8bf53b4c108 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -143,7 +143,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
143 /* remember, after old was put on the wait_q we aren't 143 /* remember, after old was put on the wait_q we aren't
144 * allowed to look at the inode any more, only thing 144 * allowed to look at the inode any more, only thing
145 * left to check was if the file_name is the same */ 145 * left to check was if the file_name is the same */
146 if (old->name_len && 146 if (!old->name_len ||
147 !strcmp(old->file_name, new->file_name)) 147 !strcmp(old->file_name, new->file_name))
148 return true; 148 return true;
149 break; 149 break;
diff --git a/fs/pipe.c b/fs/pipe.c
index 52c415114838..ae17d026aaa3 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -777,36 +777,55 @@ pipe_rdwr_release(struct inode *inode, struct file *filp)
777static int 777static int
778pipe_read_open(struct inode *inode, struct file *filp) 778pipe_read_open(struct inode *inode, struct file *filp)
779{ 779{
780 /* We could have perhaps used atomic_t, but this and friends 780 int ret = -ENOENT;
781 below are the only places. So it doesn't seem worthwhile. */ 781
782 mutex_lock(&inode->i_mutex); 782 mutex_lock(&inode->i_mutex);
783 inode->i_pipe->readers++; 783
784 if (inode->i_pipe) {
785 ret = 0;
786 inode->i_pipe->readers++;
787 }
788
784 mutex_unlock(&inode->i_mutex); 789 mutex_unlock(&inode->i_mutex);
785 790
786 return 0; 791 return ret;
787} 792}
788 793
789static int 794static int
790pipe_write_open(struct inode *inode, struct file *filp) 795pipe_write_open(struct inode *inode, struct file *filp)
791{ 796{
797 int ret = -ENOENT;
798
792 mutex_lock(&inode->i_mutex); 799 mutex_lock(&inode->i_mutex);
793 inode->i_pipe->writers++; 800
801 if (inode->i_pipe) {
802 ret = 0;
803 inode->i_pipe->writers++;
804 }
805
794 mutex_unlock(&inode->i_mutex); 806 mutex_unlock(&inode->i_mutex);
795 807
796 return 0; 808 return ret;
797} 809}
798 810
799static int 811static int
800pipe_rdwr_open(struct inode *inode, struct file *filp) 812pipe_rdwr_open(struct inode *inode, struct file *filp)
801{ 813{
814 int ret = -ENOENT;
815
802 mutex_lock(&inode->i_mutex); 816 mutex_lock(&inode->i_mutex);
803 if (filp->f_mode & FMODE_READ) 817
804 inode->i_pipe->readers++; 818 if (inode->i_pipe) {
805 if (filp->f_mode & FMODE_WRITE) 819 ret = 0;
806 inode->i_pipe->writers++; 820 if (filp->f_mode & FMODE_READ)
821 inode->i_pipe->readers++;
822 if (filp->f_mode & FMODE_WRITE)
823 inode->i_pipe->writers++;
824 }
825
807 mutex_unlock(&inode->i_mutex); 826 mutex_unlock(&inode->i_mutex);
808 827
809 return 0; 828 return ret;
810} 829}
811 830
812/* 831/*
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 56013371f9f3..a44a7897fd4d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -23,7 +23,6 @@
23#include <asm/io.h> 23#include <asm/io.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/mm.h>
27#include <linux/memory.h> 26#include <linux/memory.h>
28#include <asm/sections.h> 27#include <asm/sections.h>
29 28
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index c7bff4f603ff..a65239cfd97e 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -99,7 +99,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
99 "VmallocUsed: %8lu kB\n" 99 "VmallocUsed: %8lu kB\n"
100 "VmallocChunk: %8lu kB\n" 100 "VmallocChunk: %8lu kB\n"
101#ifdef CONFIG_MEMORY_FAILURE 101#ifdef CONFIG_MEMORY_FAILURE
102 "HardwareCorrupted: %8lu kB\n" 102 "HardwareCorrupted: %5lu kB\n"
103#endif 103#endif
104 , 104 ,
105 K(i.totalram), 105 K(i.totalram),
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 2281c2cbfe2b..5033ce0d254b 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = {
94#define KPF_COMPOUND_TAIL 16 94#define KPF_COMPOUND_TAIL 16
95#define KPF_HUGE 17 95#define KPF_HUGE 17
96#define KPF_UNEVICTABLE 18 96#define KPF_UNEVICTABLE 18
97#define KPF_HWPOISON 19
97#define KPF_NOPAGE 20 98#define KPF_NOPAGE 20
98 99
99#define KPF_KSM 21 100#define KPF_KSM 21
@@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page)
180 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 181 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
181 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 182 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
182 183
184#ifdef CONFIG_MEMORY_FAILURE
185 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
186#endif
187
183#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 188#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
184 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 189 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
185#endif 190#endif
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
index b3208adf8e71..71e2b4d50a0a 100644
--- a/fs/romfs/storage.c
+++ b/fs/romfs/storage.c
@@ -253,11 +253,11 @@ ssize_t romfs_dev_strnlen(struct super_block *sb,
253 253
254#ifdef CONFIG_ROMFS_ON_MTD 254#ifdef CONFIG_ROMFS_ON_MTD
255 if (sb->s_mtd) 255 if (sb->s_mtd)
256 return romfs_mtd_strnlen(sb, pos, limit); 256 return romfs_mtd_strnlen(sb, pos, maxlen);
257#endif 257#endif
258#ifdef CONFIG_ROMFS_ON_BLOCK 258#ifdef CONFIG_ROMFS_ON_BLOCK
259 if (sb->s_bdev) 259 if (sb->s_bdev)
260 return romfs_blk_strnlen(sb, pos, limit); 260 return romfs_blk_strnlen(sb, pos, maxlen);
261#endif 261#endif
262 return -EIO; 262 return -EIO;
263} 263}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 0050fc40e8c9..5fad489ce5bc 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -894,7 +894,8 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
894 894
895 mutex_lock(&sysfs_rename_mutex); 895 mutex_lock(&sysfs_rename_mutex);
896 BUG_ON(!sd->s_parent); 896 BUG_ON(!sd->s_parent);
897 new_parent_sd = new_parent_kobj->sd ? new_parent_kobj->sd : &sysfs_root; 897 new_parent_sd = (new_parent_kobj && new_parent_kobj->sd) ?
898 new_parent_kobj->sd : &sysfs_root;
898 899
899 error = 0; 900 error = 0;
900 if (sd->s_parent == new_parent_sd) 901 if (sd->s_parent == new_parent_sd)
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 561a9c050cef..f5ea4680f15f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -268,7 +268,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
268 struct sysfs_open_dirent *od, *new_od = NULL; 268 struct sysfs_open_dirent *od, *new_od = NULL;
269 269
270 retry: 270 retry:
271 spin_lock(&sysfs_open_dirent_lock); 271 spin_lock_irq(&sysfs_open_dirent_lock);
272 272
273 if (!sd->s_attr.open && new_od) { 273 if (!sd->s_attr.open && new_od) {
274 sd->s_attr.open = new_od; 274 sd->s_attr.open = new_od;
@@ -281,7 +281,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
281 list_add_tail(&buffer->list, &od->buffers); 281 list_add_tail(&buffer->list, &od->buffers);
282 } 282 }
283 283
284 spin_unlock(&sysfs_open_dirent_lock); 284 spin_unlock_irq(&sysfs_open_dirent_lock);
285 285
286 if (od) { 286 if (od) {
287 kfree(new_od); 287 kfree(new_od);
@@ -315,8 +315,9 @@ static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
315 struct sysfs_buffer *buffer) 315 struct sysfs_buffer *buffer)
316{ 316{
317 struct sysfs_open_dirent *od = sd->s_attr.open; 317 struct sysfs_open_dirent *od = sd->s_attr.open;
318 unsigned long flags;
318 319
319 spin_lock(&sysfs_open_dirent_lock); 320 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
320 321
321 list_del(&buffer->list); 322 list_del(&buffer->list);
322 if (atomic_dec_and_test(&od->refcnt)) 323 if (atomic_dec_and_test(&od->refcnt))
@@ -324,7 +325,7 @@ static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
324 else 325 else
325 od = NULL; 326 od = NULL;
326 327
327 spin_unlock(&sysfs_open_dirent_lock); 328 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
328 329
329 kfree(od); 330 kfree(od);
330} 331}
@@ -456,8 +457,9 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
456void sysfs_notify_dirent(struct sysfs_dirent *sd) 457void sysfs_notify_dirent(struct sysfs_dirent *sd)
457{ 458{
458 struct sysfs_open_dirent *od; 459 struct sysfs_open_dirent *od;
460 unsigned long flags;
459 461
460 spin_lock(&sysfs_open_dirent_lock); 462 spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
461 463
462 od = sd->s_attr.open; 464 od = sd->s_attr.open;
463 if (od) { 465 if (od) {
@@ -465,7 +467,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
465 wake_up_interruptible(&od->poll); 467 wake_up_interruptible(&od->poll);
466 } 468 }
467 469
468 spin_unlock(&sysfs_open_dirent_lock); 470 spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
469} 471}
470EXPORT_SYMBOL_GPL(sysfs_notify_dirent); 472EXPORT_SYMBOL_GPL(sysfs_notify_dirent);
471 473
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 381854461b28..c2e30eea74dc 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -186,19 +186,37 @@ xfs_destroy_ioend(
186} 186}
187 187
188/* 188/*
189 * If the end of the current ioend is beyond the current EOF,
190 * return the new EOF value, otherwise zero.
191 */
192STATIC xfs_fsize_t
193xfs_ioend_new_eof(
194 xfs_ioend_t *ioend)
195{
196 xfs_inode_t *ip = XFS_I(ioend->io_inode);
197 xfs_fsize_t isize;
198 xfs_fsize_t bsize;
199
200 bsize = ioend->io_offset + ioend->io_size;
201 isize = MAX(ip->i_size, ip->i_new_size);
202 isize = MIN(isize, bsize);
203 return isize > ip->i_d.di_size ? isize : 0;
204}
205
206/*
189 * Update on-disk file size now that data has been written to disk. 207 * Update on-disk file size now that data has been written to disk.
190 * The current in-memory file size is i_size. If a write is beyond 208 * The current in-memory file size is i_size. If a write is beyond
191 * eof i_new_size will be the intended file size until i_size is 209 * eof i_new_size will be the intended file size until i_size is
192 * updated. If this write does not extend all the way to the valid 210 * updated. If this write does not extend all the way to the valid
193 * file size then restrict this update to the end of the write. 211 * file size then restrict this update to the end of the write.
194 */ 212 */
213
195STATIC void 214STATIC void
196xfs_setfilesize( 215xfs_setfilesize(
197 xfs_ioend_t *ioend) 216 xfs_ioend_t *ioend)
198{ 217{
199 xfs_inode_t *ip = XFS_I(ioend->io_inode); 218 xfs_inode_t *ip = XFS_I(ioend->io_inode);
200 xfs_fsize_t isize; 219 xfs_fsize_t isize;
201 xfs_fsize_t bsize;
202 220
203 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 221 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
204 ASSERT(ioend->io_type != IOMAP_READ); 222 ASSERT(ioend->io_type != IOMAP_READ);
@@ -206,16 +224,10 @@ xfs_setfilesize(
206 if (unlikely(ioend->io_error)) 224 if (unlikely(ioend->io_error))
207 return; 225 return;
208 226
209 bsize = ioend->io_offset + ioend->io_size;
210
211 xfs_ilock(ip, XFS_ILOCK_EXCL); 227 xfs_ilock(ip, XFS_ILOCK_EXCL);
212 228 isize = xfs_ioend_new_eof(ioend);
213 isize = MAX(ip->i_size, ip->i_new_size); 229 if (isize) {
214 isize = MIN(isize, bsize);
215
216 if (ip->i_d.di_size < isize) {
217 ip->i_d.di_size = isize; 230 ip->i_d.di_size = isize;
218 ip->i_update_core = 1;
219 xfs_mark_inode_dirty_sync(ip); 231 xfs_mark_inode_dirty_sync(ip);
220 } 232 }
221 233
@@ -404,10 +416,16 @@ xfs_submit_ioend_bio(
404 struct bio *bio) 416 struct bio *bio)
405{ 417{
406 atomic_inc(&ioend->io_remaining); 418 atomic_inc(&ioend->io_remaining);
407
408 bio->bi_private = ioend; 419 bio->bi_private = ioend;
409 bio->bi_end_io = xfs_end_bio; 420 bio->bi_end_io = xfs_end_bio;
410 421
422 /*
423 * If the I/O is beyond EOF we mark the inode dirty immediately
424 * but don't update the inode size until I/O completion.
425 */
426 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428
411 submit_bio(WRITE, bio); 429 submit_bio(WRITE, bio);
412 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
413 bio_put(bio); 431 bio_put(bio);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 629370974e57..eff61e2732af 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -176,14 +176,7 @@ xfs_file_fsync(
176 struct dentry *dentry, 176 struct dentry *dentry,
177 int datasync) 177 int datasync)
178{ 178{
179 struct inode *inode = dentry->d_inode; 179 struct xfs_inode *ip = XFS_I(dentry->d_inode);
180 struct xfs_inode *ip = XFS_I(inode);
181 int error;
182
183 /* capture size updates in I/O completion before writing the inode. */
184 error = filemap_fdatawait(inode->i_mapping);
185 if (error)
186 return error;
187 180
188 xfs_iflags_clear(ip, XFS_ITRUNCATED); 181 xfs_iflags_clear(ip, XFS_ITRUNCATED);
189 return -xfs_fsync(ip); 182 return -xfs_fsync(ip);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index da0159d99f82..cd42ef78f6b5 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -57,19 +57,22 @@
57#include <linux/fiemap.h> 57#include <linux/fiemap.h>
58 58
59/* 59/*
60 * Bring the atime in the XFS inode uptodate. 60 * Bring the timestamps in the XFS inode uptodate.
61 * Used before logging the inode to disk or when the Linux inode goes away. 61 *
62 * Used before writing the inode to disk.
62 */ 63 */
63void 64void
64xfs_synchronize_atime( 65xfs_synchronize_times(
65 xfs_inode_t *ip) 66 xfs_inode_t *ip)
66{ 67{
67 struct inode *inode = VFS_I(ip); 68 struct inode *inode = VFS_I(ip);
68 69
69 if (!(inode->i_state & I_CLEAR)) { 70 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
70 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; 71 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
71 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; 72 ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec;
72 } 73 ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec;
74 ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec;
75 ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec;
73} 76}
74 77
75/* 78/*
@@ -106,32 +109,20 @@ xfs_ichgtime(
106 if ((flags & XFS_ICHGTIME_MOD) && 109 if ((flags & XFS_ICHGTIME_MOD) &&
107 !timespec_equal(&inode->i_mtime, &tv)) { 110 !timespec_equal(&inode->i_mtime, &tv)) {
108 inode->i_mtime = tv; 111 inode->i_mtime = tv;
109 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
110 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
111 sync_it = 1; 112 sync_it = 1;
112 } 113 }
113 if ((flags & XFS_ICHGTIME_CHG) && 114 if ((flags & XFS_ICHGTIME_CHG) &&
114 !timespec_equal(&inode->i_ctime, &tv)) { 115 !timespec_equal(&inode->i_ctime, &tv)) {
115 inode->i_ctime = tv; 116 inode->i_ctime = tv;
116 ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
117 ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
118 sync_it = 1; 117 sync_it = 1;
119 } 118 }
120 119
121 /* 120 /*
122 * We update the i_update_core field _after_ changing 121 * Update complete - now make sure everyone knows that the inode
123 * the timestamps in order to coordinate properly with 122 * is dirty.
124 * xfs_iflush() so that we don't lose timestamp updates.
125 * This keeps us from having to hold the inode lock
126 * while doing this. We use the SYNCHRONIZE macro to
127 * ensure that the compiler does not reorder the update
128 * of i_update_core above the timestamp updates above.
129 */ 123 */
130 if (sync_it) { 124 if (sync_it)
131 SYNCHRONIZE();
132 ip->i_update_core = 1;
133 xfs_mark_inode_dirty_sync(ip); 125 xfs_mark_inode_dirty_sync(ip);
134 }
135} 126}
136 127
137/* 128/*
@@ -506,10 +497,8 @@ xfs_vn_getattr(
506 stat->gid = ip->i_d.di_gid; 497 stat->gid = ip->i_d.di_gid;
507 stat->ino = ip->i_ino; 498 stat->ino = ip->i_ino;
508 stat->atime = inode->i_atime; 499 stat->atime = inode->i_atime;
509 stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec; 500 stat->mtime = inode->i_mtime;
510 stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 501 stat->ctime = inode->i_ctime;
511 stat->ctime.tv_sec = ip->i_d.di_ctime.t_sec;
512 stat->ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
513 stat->blocks = 502 stat->blocks =
514 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 503 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
515 504
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 49e4a6aea73c..072050f8d346 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -667,7 +667,7 @@ start:
667 xip->i_new_size = new_size; 667 xip->i_new_size = new_size;
668 668
669 if (likely(!(ioflags & IO_INVIS))) 669 if (likely(!(ioflags & IO_INVIS)))
670 xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 670 file_update_time(file);
671 671
672 /* 672 /*
673 * If the offset is beyond the size of the file, we have a couple 673 * If the offset is beyond the size of the file, we have a couple
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 9e41f91aa269..3d4a0c84d634 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -80,7 +80,7 @@ xfs_fs_set_xstate(
80 80
81 if (sb->s_flags & MS_RDONLY) 81 if (sb->s_flags & MS_RDONLY)
82 return -EROFS; 82 return -EROFS;
83 if (!XFS_IS_QUOTA_RUNNING(mp)) 83 if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
84 return -ENOSYS; 84 return -ENOSYS;
85 if (!capable(CAP_SYS_ADMIN)) 85 if (!capable(CAP_SYS_ADMIN))
86 return -EPERM; 86 return -EPERM;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index bdd41c8c342f..18a4b8e11df2 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -977,6 +977,28 @@ xfs_fs_inode_init_once(
977} 977}
978 978
979/* 979/*
980 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
981 * we catch unlogged VFS level updates to the inode. Care must be taken
982 * here - the transaction code calls mark_inode_dirty_sync() to mark the
983 * VFS inode dirty in a transaction and clears the i_update_core field;
984 * it must clear the field after calling mark_inode_dirty_sync() to
985 * correctly indicate that the dirty state has been propagated into the
986 * inode log item.
987 *
988 * We need the barrier() to maintain correct ordering between unlogged
989 * updates and the transaction commit code that clears the i_update_core
990 * field. This requires all updates to be completed before marking the
991 * inode dirty.
992 */
993STATIC void
994xfs_fs_dirty_inode(
995 struct inode *inode)
996{
997 barrier();
998 XFS_I(inode)->i_update_core = 1;
999}
1000
1001/*
980 * Attempt to flush the inode, this will actually fail 1002 * Attempt to flush the inode, this will actually fail
981 * if the inode is pinned, but we dirty the inode again 1003 * if the inode is pinned, but we dirty the inode again
982 * at the point when it is unpinned after a log write, 1004 * at the point when it is unpinned after a log write,
@@ -1126,7 +1148,7 @@ xfs_fs_put_super(
1126} 1148}
1127 1149
1128STATIC int 1150STATIC int
1129xfs_fs_sync_super( 1151xfs_fs_sync_fs(
1130 struct super_block *sb, 1152 struct super_block *sb,
1131 int wait) 1153 int wait)
1132{ 1154{
@@ -1134,23 +1156,23 @@ xfs_fs_sync_super(
1134 int error; 1156 int error;
1135 1157
1136 /* 1158 /*
1137 * Treat a sync operation like a freeze. This is to work 1159 * Not much we can do for the first async pass. Writing out the
1138 * around a race in sync_inodes() which works in two phases 1160 * superblock would be counter-productive as we are going to redirty
1139 * - an asynchronous flush, which can write out an inode 1161 * when writing out other data and metadata (and writing out a single
1140 * without waiting for file size updates to complete, and a 1162 * block is quite fast anyway).
1141 * synchronous flush, which wont do anything because the 1163 *
1142 * async flush removed the inode's dirty flag. Also 1164 * Try to asynchronously kick off quota syncing at least.
1143 * sync_inodes() will not see any files that just have
1144 * outstanding transactions to be flushed because we don't
1145 * dirty the Linux inode until after the transaction I/O
1146 * completes.
1147 */ 1165 */
1148 if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) 1166 if (!wait) {
1149 error = xfs_quiesce_data(mp); 1167 xfs_qm_sync(mp, SYNC_TRYLOCK);
1150 else 1168 return 0;
1151 error = xfs_sync_fsdata(mp, 0); 1169 }
1170
1171 error = xfs_quiesce_data(mp);
1172 if (error)
1173 return -error;
1152 1174
1153 if (unlikely(laptop_mode)) { 1175 if (laptop_mode) {
1154 int prev_sync_seq = mp->m_sync_seq; 1176 int prev_sync_seq = mp->m_sync_seq;
1155 1177
1156 /* 1178 /*
@@ -1169,7 +1191,7 @@ xfs_fs_sync_super(
1169 mp->m_sync_seq != prev_sync_seq); 1191 mp->m_sync_seq != prev_sync_seq);
1170 } 1192 }
1171 1193
1172 return -error; 1194 return 0;
1173} 1195}
1174 1196
1175STATIC int 1197STATIC int
@@ -1539,10 +1561,11 @@ xfs_fs_get_sb(
1539static const struct super_operations xfs_super_operations = { 1561static const struct super_operations xfs_super_operations = {
1540 .alloc_inode = xfs_fs_alloc_inode, 1562 .alloc_inode = xfs_fs_alloc_inode,
1541 .destroy_inode = xfs_fs_destroy_inode, 1563 .destroy_inode = xfs_fs_destroy_inode,
1564 .dirty_inode = xfs_fs_dirty_inode,
1542 .write_inode = xfs_fs_write_inode, 1565 .write_inode = xfs_fs_write_inode,
1543 .clear_inode = xfs_fs_clear_inode, 1566 .clear_inode = xfs_fs_clear_inode,
1544 .put_super = xfs_fs_put_super, 1567 .put_super = xfs_fs_put_super,
1545 .sync_fs = xfs_fs_sync_super, 1568 .sync_fs = xfs_fs_sync_fs,
1546 .freeze_fs = xfs_fs_freeze, 1569 .freeze_fs = xfs_fs_freeze,
1547 .statfs = xfs_fs_statfs, 1570 .statfs = xfs_fs_statfs,
1548 .remount_fs = xfs_fs_remount, 1571 .remount_fs = xfs_fs_remount,
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 320be6aea492..961df0a22c78 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -309,11 +309,15 @@ xfs_sync_attr(
309STATIC int 309STATIC int
310xfs_commit_dummy_trans( 310xfs_commit_dummy_trans(
311 struct xfs_mount *mp, 311 struct xfs_mount *mp,
312 uint log_flags) 312 uint flags)
313{ 313{
314 struct xfs_inode *ip = mp->m_rootip; 314 struct xfs_inode *ip = mp->m_rootip;
315 struct xfs_trans *tp; 315 struct xfs_trans *tp;
316 int error; 316 int error;
317 int log_flags = XFS_LOG_FORCE;
318
319 if (flags & SYNC_WAIT)
320 log_flags |= XFS_LOG_SYNC;
317 321
318 /* 322 /*
319 * Put a dummy transaction in the log to tell recovery 323 * Put a dummy transaction in the log to tell recovery
@@ -331,13 +335,12 @@ xfs_commit_dummy_trans(
331 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 335 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
332 xfs_trans_ihold(tp, ip); 336 xfs_trans_ihold(tp, ip);
333 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 337 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
334 /* XXX(hch): ignoring the error here.. */
335 error = xfs_trans_commit(tp, 0); 338 error = xfs_trans_commit(tp, 0);
336
337 xfs_iunlock(ip, XFS_ILOCK_EXCL); 339 xfs_iunlock(ip, XFS_ILOCK_EXCL);
338 340
341 /* the log force ensures this transaction is pushed to disk */
339 xfs_log_force(mp, 0, log_flags); 342 xfs_log_force(mp, 0, log_flags);
340 return 0; 343 return error;
341} 344}
342 345
343int 346int
@@ -385,7 +388,20 @@ xfs_sync_fsdata(
385 else 388 else
386 XFS_BUF_ASYNC(bp); 389 XFS_BUF_ASYNC(bp);
387 390
388 return xfs_bwrite(mp, bp); 391 error = xfs_bwrite(mp, bp);
392 if (error)
393 return error;
394
395 /*
396 * If this is a data integrity sync make sure all pending buffers
397 * are flushed out for the log coverage check below.
398 */
399 if (flags & SYNC_WAIT)
400 xfs_flush_buftarg(mp->m_ddev_targp, 1);
401
402 if (xfs_log_need_covered(mp))
403 error = xfs_commit_dummy_trans(mp, flags);
404 return error;
389 405
390 out_brelse: 406 out_brelse:
391 xfs_buf_relse(bp); 407 xfs_buf_relse(bp);
@@ -419,14 +435,16 @@ xfs_quiesce_data(
419 /* push non-blocking */ 435 /* push non-blocking */
420 xfs_sync_data(mp, 0); 436 xfs_sync_data(mp, 0);
421 xfs_qm_sync(mp, SYNC_TRYLOCK); 437 xfs_qm_sync(mp, SYNC_TRYLOCK);
422 xfs_filestream_flush(mp);
423 438
424 /* push and block */ 439 /* push and block till complete */
425 xfs_sync_data(mp, SYNC_WAIT); 440 xfs_sync_data(mp, SYNC_WAIT);
426 xfs_qm_sync(mp, SYNC_WAIT); 441 xfs_qm_sync(mp, SYNC_WAIT);
427 442
443 /* drop inode references pinned by filestreams */
444 xfs_filestream_flush(mp);
445
428 /* write superblock and hoover up shutdown errors */ 446 /* write superblock and hoover up shutdown errors */
429 error = xfs_sync_fsdata(mp, 0); 447 error = xfs_sync_fsdata(mp, SYNC_WAIT);
430 448
431 /* flush data-only devices */ 449 /* flush data-only devices */
432 if (mp->m_rtdev_targp) 450 if (mp->m_rtdev_targp)
@@ -570,8 +588,6 @@ xfs_sync_worker(
570 /* dgc: errors ignored here */ 588 /* dgc: errors ignored here */
571 error = xfs_qm_sync(mp, SYNC_TRYLOCK); 589 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
572 error = xfs_sync_fsdata(mp, SYNC_TRYLOCK); 590 error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
573 if (xfs_log_need_covered(mp))
574 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
575 } 591 }
576 mp->m_sync_seq++; 592 mp->m_sync_seq++;
577 wake_up(&mp->m_wait_single_sync_task); 593 wake_up(&mp->m_wait_single_sync_task);
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 4e4276b956e8..5d1a3b98a6e6 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -876,7 +876,6 @@ xfs_dqrele_inode(
876 ip->i_gdquot = NULL; 876 ip->i_gdquot = NULL;
877 } 877 }
878 xfs_iput(ip, XFS_ILOCK_EXCL); 878 xfs_iput(ip, XFS_ILOCK_EXCL);
879 IRELE(ip);
880 879
881 return 0; 880 return 0;
882} 881}
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 7465f9ee125f..ab89a7e94a0f 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -206,10 +206,10 @@ xfs_swap_extents(
206 * process that the file was not changed out from 206 * process that the file was not changed out from
207 * under it. 207 * under it.
208 */ 208 */
209 if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) || 209 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
210 (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) || 210 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
211 (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) || 211 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
212 (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) { 212 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
213 error = XFS_ERROR(EBUSY); 213 error = XFS_ERROR(EBUSY);
214 goto out_unlock; 214 goto out_unlock;
215 } 215 }
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index fa913e459442..41ad537c49e9 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -854,6 +854,7 @@ xfs_dir2_leaf_getdents(
854 */ 854 */
855 ra_want = howmany(bufsize + mp->m_dirblksize, 855 ra_want = howmany(bufsize + mp->m_dirblksize,
856 mp->m_sb.sb_blocksize) - 1; 856 mp->m_sb.sb_blocksize) - 1;
857 ASSERT(ra_want >= 0);
857 858
858 /* 859 /*
859 * If we don't have as many as we want, and we haven't 860 * If we don't have as many as we want, and we haven't
@@ -1088,7 +1089,8 @@ xfs_dir2_leaf_getdents(
1088 */ 1089 */
1089 ptr += length; 1090 ptr += length;
1090 curoff += length; 1091 curoff += length;
1091 bufsize -= length; 1092 /* bufsize may have just been a guess; don't go negative */
1093 bufsize = bufsize > length ? bufsize - length : 0;
1092 } 1094 }
1093 1095
1094 /* 1096 /*
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index ab64f3efb43b..0785797db828 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -880,6 +880,7 @@ nextag:
880 * Not in range - save last search 880 * Not in range - save last search
881 * location and allocate a new inode 881 * location and allocate a new inode
882 */ 882 */
883 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
883 pag->pagl_leftrec = trec.ir_startino; 884 pag->pagl_leftrec = trec.ir_startino;
884 pag->pagl_rightrec = rec.ir_startino; 885 pag->pagl_rightrec = rec.ir_startino;
885 pag->pagl_pagino = pagino; 886 pag->pagl_pagino = pagino;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c1dc7ef5a1d8..b92a4fa2a0a1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3068,9 +3068,9 @@ xfs_iflush_int(
3068 SYNCHRONIZE(); 3068 SYNCHRONIZE();
3069 3069
3070 /* 3070 /*
3071 * Make sure to get the latest atime from the Linux inode. 3071 * Make sure to get the latest timestamps from the Linux inode.
3072 */ 3072 */
3073 xfs_synchronize_atime(ip); 3073 xfs_synchronize_times(ip);
3074 3074
3075 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 3075 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
3076 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3076 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0b38b9a869ec..41555de1d1db 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -504,7 +504,7 @@ void xfs_ichgtime(xfs_inode_t *, int);
504void xfs_lock_inodes(xfs_inode_t **, int, uint); 504void xfs_lock_inodes(xfs_inode_t **, int, uint);
505void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); 505void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
506 506
507void xfs_synchronize_atime(xfs_inode_t *); 507void xfs_synchronize_times(xfs_inode_t *);
508void xfs_mark_inode_dirty_sync(xfs_inode_t *); 508void xfs_mark_inode_dirty_sync(xfs_inode_t *);
509 509
510#if defined(XFS_INODE_TRACE) 510#if defined(XFS_INODE_TRACE)
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 47d5b663c37e..9794b876d6ff 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -232,6 +232,15 @@ xfs_inode_item_format(
232 nvecs = 1; 232 nvecs = 1;
233 233
234 /* 234 /*
235 * Make sure the linux inode is dirty. We do this before
236 * clearing i_update_core as the VFS will call back into
237 * XFS here and set i_update_core, so we need to dirty the
238 * inode first so that the ordering of i_update_core and
239 * unlogged modifications still works as described below.
240 */
241 xfs_mark_inode_dirty_sync(ip);
242
243 /*
235 * Clear i_update_core if the timestamps (or any other 244 * Clear i_update_core if the timestamps (or any other
236 * non-transactional modification) need flushing/logging 245 * non-transactional modification) need flushing/logging
237 * and we're about to log them with the rest of the core. 246 * and we're about to log them with the rest of the core.
@@ -263,14 +272,9 @@ xfs_inode_item_format(
263 } 272 }
264 273
265 /* 274 /*
266 * Make sure to get the latest atime from the Linux inode. 275 * Make sure to get the latest timestamps from the Linux inode.
267 */ 276 */
268 xfs_synchronize_atime(ip); 277 xfs_synchronize_times(ip);
269
270 /*
271 * make sure the linux inode is dirty
272 */
273 xfs_mark_inode_dirty_sync(ip);
274 278
275 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 279 vecp->i_addr = (xfs_caddr_t)&ip->i_d;
276 vecp->i_len = sizeof(struct xfs_icdinode); 280 vecp->i_len = sizeof(struct xfs_icdinode);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index b68f9107e26c..62efab2f3839 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -59,6 +59,7 @@ xfs_bulkstat_one_iget(
59{ 59{
60 xfs_icdinode_t *dic; /* dinode core info pointer */ 60 xfs_icdinode_t *dic; /* dinode core info pointer */
61 xfs_inode_t *ip; /* incore inode pointer */ 61 xfs_inode_t *ip; /* incore inode pointer */
62 struct inode *inode;
62 int error; 63 int error;
63 64
64 error = xfs_iget(mp, NULL, ino, 65 error = xfs_iget(mp, NULL, ino,
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
72 ASSERT(ip->i_imap.im_blkno != 0); 73 ASSERT(ip->i_imap.im_blkno != 0);
73 74
74 dic = &ip->i_d; 75 dic = &ip->i_d;
76 inode = VFS_I(ip);
75 77
76 /* xfs_iget returns the following without needing 78 /* xfs_iget returns the following without needing
77 * further change. 79 * further change.
@@ -83,16 +85,19 @@ xfs_bulkstat_one_iget(
83 buf->bs_uid = dic->di_uid; 85 buf->bs_uid = dic->di_uid;
84 buf->bs_gid = dic->di_gid; 86 buf->bs_gid = dic->di_gid;
85 buf->bs_size = dic->di_size; 87 buf->bs_size = dic->di_size;
88
86 /* 89 /*
87 * We are reading the atime from the Linux inode because the 90 * We need to read the timestamps from the Linux inode because
88 * dinode might not be uptodate. 91 * the VFS keeps writing directly into the inode structure instead
92 * of telling us about the updates.
89 */ 93 */
90 buf->bs_atime.tv_sec = VFS_I(ip)->i_atime.tv_sec; 94 buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
91 buf->bs_atime.tv_nsec = VFS_I(ip)->i_atime.tv_nsec; 95 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
92 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 96 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
93 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 97 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
94 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 98 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
95 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; 99 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
100
96 buf->bs_xflags = xfs_ip2xflags(ip); 101 buf->bs_xflags = xfs_ip2xflags(ip);
97 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 102 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
98 buf->bs_extents = dic->di_nextents; 103 buf->bs_extents = dic->di_nextents;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index a434f287962d..b572f7e840e0 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2476,12 +2476,6 @@ xfs_reclaim(
2476 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 2476 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
2477 2477
2478 /* 2478 /*
2479 * Make sure the atime in the XFS inode is correct before freeing the
2480 * Linux inode.
2481 */
2482 xfs_synchronize_atime(ip);
2483
2484 /*
2485 * If we have nothing to flush with this inode then complete the 2479 * If we have nothing to flush with this inode then complete the
2486 * teardown now, otherwise break the link between the xfs inode and the 2480 * teardown now, otherwise break the link between the xfs inode and the
2487 * linux inode and clean up the xfs inode later. This avoids flushing 2481 * linux inode and clean up the xfs inode later. This avoids flushing