aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/cramfs/Kconfig5
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/devpts/inode.c1
-rw-r--r--fs/eventpoll.c145
-rw-r--r--fs/exec.c6
-rw-r--r--fs/fs-writeback.c33
-rw-r--r--fs/hfs/btree.h5
-rw-r--r--fs/hfsplus/btree.c112
-rw-r--r--fs/hfsplus/hfsplus_fs.h10
-rw-r--r--fs/hfsplus/hfsplus_raw.h11
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c207
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c32
-rw-r--r--fs/ocfs2/buffer_head_io.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c40
-rw-r--r--fs/ocfs2/cluster/masklog.h3
-rw-r--r--fs/ocfs2/dir.c12
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c8
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c7
-rw-r--r--fs/ocfs2/file.c5
-rw-r--r--fs/ocfs2/journal.h3
-rw-r--r--fs/ocfs2/move_extents.c11
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/ocfs2/refcounttree.c20
-rw-r--r--fs/ocfs2/resize.c12
-rw-r--r--fs/ocfs2/stackglue.c8
-rw-r--r--fs/ocfs2/suballoc.c4
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/ocfs2/xattr.c28
-rw-r--r--fs/proc/Kconfig4
-rw-r--r--fs/proc/inode.c16
-rw-r--r--fs/proc/kcore.c3
-rw-r--r--fs/proc/meminfo.c5
-rw-r--r--fs/proc/task_mmu.c17
-rw-r--r--fs/sync.c15
-rw-r--r--fs/xfs/xfs_super.c2
37 files changed, 601 insertions, 206 deletions
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
index cd06466f365e..11b29d491b7c 100644
--- a/fs/cramfs/Kconfig
+++ b/fs/cramfs/Kconfig
@@ -1,5 +1,5 @@
1config CRAMFS 1config CRAMFS
2 tristate "Compressed ROM file system support (cramfs)" 2 tristate "Compressed ROM file system support (cramfs) (OBSOLETE)"
3 depends on BLOCK 3 depends on BLOCK
4 select ZLIB_INFLATE 4 select ZLIB_INFLATE
5 help 5 help
@@ -16,4 +16,7 @@ config CRAMFS
16 cramfs. Note that the root file system (the one containing the 16 cramfs. Note that the root file system (the one containing the
17 directory /) cannot be compiled as a module. 17 directory /) cannot be compiled as a module.
18 18
19 This filesystem is obsoleted by SquashFS, which is much better
20 in terms of performance and features.
21
19 If unsure, say N. 22 If unsure, say N.
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index c7c83ff0f752..9c0444cccbe1 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -566,8 +566,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
566 mutex_lock(&parent->d_inode->i_mutex); 566 mutex_lock(&parent->d_inode->i_mutex);
567 567
568 if (child != dentry) { 568 if (child != dentry) {
569 next = list_entry(child->d_u.d_child.next, struct dentry, 569 next = list_next_entry(child, d_u.d_child);
570 d_u.d_child);
571 goto up; 570 goto up;
572 } 571 }
573 572
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 073d30b9d1ac..a726b9f29cb7 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -498,6 +498,7 @@ static void devpts_kill_sb(struct super_block *sb)
498{ 498{
499 struct pts_fs_info *fsi = DEVPTS_SB(sb); 499 struct pts_fs_info *fsi = DEVPTS_SB(sb);
500 500
501 ida_destroy(&fsi->allocated_ptys);
501 kfree(fsi); 502 kfree(fsi);
502 kill_litter_super(sb); 503 kill_litter_super(sb);
503} 504}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 983e3960abff..79b65c3b9e87 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -41,6 +41,7 @@
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/compat.h> 43#include <linux/compat.h>
44#include <linux/rculist.h>
44 45
45/* 46/*
46 * LOCKING: 47 * LOCKING:
@@ -133,8 +134,12 @@ struct nested_calls {
133 * of these on a server and we do not want this to take another cache line. 134 * of these on a server and we do not want this to take another cache line.
134 */ 135 */
135struct epitem { 136struct epitem {
136 /* RB tree node used to link this structure to the eventpoll RB tree */ 137 union {
137 struct rb_node rbn; 138 /* RB tree node links this structure to the eventpoll RB tree */
139 struct rb_node rbn;
140 /* Used to free the struct epitem */
141 struct rcu_head rcu;
142 };
138 143
139 /* List header used to link this structure to the eventpoll ready list */ 144 /* List header used to link this structure to the eventpoll ready list */
140 struct list_head rdllink; 145 struct list_head rdllink;
@@ -580,14 +585,14 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
580 * @sproc: Pointer to the scan callback. 585 * @sproc: Pointer to the scan callback.
581 * @priv: Private opaque data passed to the @sproc callback. 586 * @priv: Private opaque data passed to the @sproc callback.
582 * @depth: The current depth of recursive f_op->poll calls. 587 * @depth: The current depth of recursive f_op->poll calls.
588 * @ep_locked: caller already holds ep->mtx
583 * 589 *
584 * Returns: The same integer error code returned by the @sproc callback. 590 * Returns: The same integer error code returned by the @sproc callback.
585 */ 591 */
586static int ep_scan_ready_list(struct eventpoll *ep, 592static int ep_scan_ready_list(struct eventpoll *ep,
587 int (*sproc)(struct eventpoll *, 593 int (*sproc)(struct eventpoll *,
588 struct list_head *, void *), 594 struct list_head *, void *),
589 void *priv, 595 void *priv, int depth, bool ep_locked)
590 int depth)
591{ 596{
592 int error, pwake = 0; 597 int error, pwake = 0;
593 unsigned long flags; 598 unsigned long flags;
@@ -598,7 +603,9 @@ static int ep_scan_ready_list(struct eventpoll *ep,
598 * We need to lock this because we could be hit by 603 * We need to lock this because we could be hit by
599 * eventpoll_release_file() and epoll_ctl(). 604 * eventpoll_release_file() and epoll_ctl().
600 */ 605 */
601 mutex_lock_nested(&ep->mtx, depth); 606
607 if (!ep_locked)
608 mutex_lock_nested(&ep->mtx, depth);
602 609
603 /* 610 /*
604 * Steal the ready list, and re-init the original one to the 611 * Steal the ready list, and re-init the original one to the
@@ -662,7 +669,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
662 } 669 }
663 spin_unlock_irqrestore(&ep->lock, flags); 670 spin_unlock_irqrestore(&ep->lock, flags);
664 671
665 mutex_unlock(&ep->mtx); 672 if (!ep_locked)
673 mutex_unlock(&ep->mtx);
666 674
667 /* We have to call this outside the lock */ 675 /* We have to call this outside the lock */
668 if (pwake) 676 if (pwake)
@@ -671,6 +679,12 @@ static int ep_scan_ready_list(struct eventpoll *ep,
671 return error; 679 return error;
672} 680}
673 681
682static void epi_rcu_free(struct rcu_head *head)
683{
684 struct epitem *epi = container_of(head, struct epitem, rcu);
685 kmem_cache_free(epi_cache, epi);
686}
687
674/* 688/*
675 * Removes a "struct epitem" from the eventpoll RB tree and deallocates 689 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
676 * all the associated resources. Must be called with "mtx" held. 690 * all the associated resources. Must be called with "mtx" held.
@@ -692,8 +706,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
692 706
693 /* Remove the current item from the list of epoll hooks */ 707 /* Remove the current item from the list of epoll hooks */
694 spin_lock(&file->f_lock); 708 spin_lock(&file->f_lock);
695 if (ep_is_linked(&epi->fllink)) 709 list_del_rcu(&epi->fllink);
696 list_del_init(&epi->fllink);
697 spin_unlock(&file->f_lock); 710 spin_unlock(&file->f_lock);
698 711
699 rb_erase(&epi->rbn, &ep->rbr); 712 rb_erase(&epi->rbn, &ep->rbr);
@@ -704,9 +717,14 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
704 spin_unlock_irqrestore(&ep->lock, flags); 717 spin_unlock_irqrestore(&ep->lock, flags);
705 718
706 wakeup_source_unregister(ep_wakeup_source(epi)); 719 wakeup_source_unregister(ep_wakeup_source(epi));
707 720 /*
708 /* At this point it is safe to free the eventpoll item */ 721 * At this point it is safe to free the eventpoll item. Use the union
709 kmem_cache_free(epi_cache, epi); 722 * field epi->rcu, since we are trying to minimize the size of
723 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
724 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
725 * use of the rbn field.
726 */
727 call_rcu(&epi->rcu, epi_rcu_free);
710 728
711 atomic_long_dec(&ep->user->epoll_watches); 729 atomic_long_dec(&ep->user->epoll_watches);
712 730
@@ -807,15 +825,34 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
807 return 0; 825 return 0;
808} 826}
809 827
828static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
829 poll_table *pt);
830
831struct readyevents_arg {
832 struct eventpoll *ep;
833 bool locked;
834};
835
810static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests) 836static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
811{ 837{
812 return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1); 838 struct readyevents_arg *arg = priv;
839
840 return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
841 call_nests + 1, arg->locked);
813} 842}
814 843
815static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) 844static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
816{ 845{
817 int pollflags; 846 int pollflags;
818 struct eventpoll *ep = file->private_data; 847 struct eventpoll *ep = file->private_data;
848 struct readyevents_arg arg;
849
850 /*
851 * During ep_insert() we already hold the ep->mtx for the tfile.
852 * Prevent re-aquisition.
853 */
854 arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
855 arg.ep = ep;
819 856
820 /* Insert inside our poll wait queue */ 857 /* Insert inside our poll wait queue */
821 poll_wait(file, &ep->poll_wait, wait); 858 poll_wait(file, &ep->poll_wait, wait);
@@ -827,7 +864,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
827 * could re-enter here. 864 * could re-enter here.
828 */ 865 */
829 pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, 866 pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
830 ep_poll_readyevents_proc, ep, ep, current); 867 ep_poll_readyevents_proc, &arg, ep, current);
831 868
832 return pollflags != -1 ? pollflags : 0; 869 return pollflags != -1 ? pollflags : 0;
833} 870}
@@ -872,7 +909,6 @@ static const struct file_operations eventpoll_fops = {
872 */ 909 */
873void eventpoll_release_file(struct file *file) 910void eventpoll_release_file(struct file *file)
874{ 911{
875 struct list_head *lsthead = &file->f_ep_links;
876 struct eventpoll *ep; 912 struct eventpoll *ep;
877 struct epitem *epi; 913 struct epitem *epi;
878 914
@@ -890,17 +926,12 @@ void eventpoll_release_file(struct file *file)
890 * Besides, ep_remove() acquires the lock, so we can't hold it here. 926 * Besides, ep_remove() acquires the lock, so we can't hold it here.
891 */ 927 */
892 mutex_lock(&epmutex); 928 mutex_lock(&epmutex);
893 929 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
894 while (!list_empty(lsthead)) {
895 epi = list_first_entry(lsthead, struct epitem, fllink);
896
897 ep = epi->ep; 930 ep = epi->ep;
898 list_del_init(&epi->fllink);
899 mutex_lock_nested(&ep->mtx, 0); 931 mutex_lock_nested(&ep->mtx, 0);
900 ep_remove(ep, epi); 932 ep_remove(ep, epi);
901 mutex_unlock(&ep->mtx); 933 mutex_unlock(&ep->mtx);
902 } 934 }
903
904 mutex_unlock(&epmutex); 935 mutex_unlock(&epmutex);
905} 936}
906 937
@@ -1138,7 +1169,9 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1138 struct file *child_file; 1169 struct file *child_file;
1139 struct epitem *epi; 1170 struct epitem *epi;
1140 1171
1141 list_for_each_entry(epi, &file->f_ep_links, fllink) { 1172 /* CTL_DEL can remove links here, but that can't increase our count */
1173 rcu_read_lock();
1174 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
1142 child_file = epi->ep->file; 1175 child_file = epi->ep->file;
1143 if (is_file_epoll(child_file)) { 1176 if (is_file_epoll(child_file)) {
1144 if (list_empty(&child_file->f_ep_links)) { 1177 if (list_empty(&child_file->f_ep_links)) {
@@ -1160,6 +1193,7 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1160 "file is not an ep!\n"); 1193 "file is not an ep!\n");
1161 } 1194 }
1162 } 1195 }
1196 rcu_read_unlock();
1163 return error; 1197 return error;
1164} 1198}
1165 1199
@@ -1231,7 +1265,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1231 * Must be called with "mtx" held. 1265 * Must be called with "mtx" held.
1232 */ 1266 */
1233static int ep_insert(struct eventpoll *ep, struct epoll_event *event, 1267static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1234 struct file *tfile, int fd) 1268 struct file *tfile, int fd, int full_check)
1235{ 1269{
1236 int error, revents, pwake = 0; 1270 int error, revents, pwake = 0;
1237 unsigned long flags; 1271 unsigned long flags;
@@ -1286,7 +1320,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1286 1320
1287 /* Add the current item to the list of active epoll hook for this file */ 1321 /* Add the current item to the list of active epoll hook for this file */
1288 spin_lock(&tfile->f_lock); 1322 spin_lock(&tfile->f_lock);
1289 list_add_tail(&epi->fllink, &tfile->f_ep_links); 1323 list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1290 spin_unlock(&tfile->f_lock); 1324 spin_unlock(&tfile->f_lock);
1291 1325
1292 /* 1326 /*
@@ -1297,7 +1331,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1297 1331
1298 /* now check if we've created too many backpaths */ 1332 /* now check if we've created too many backpaths */
1299 error = -EINVAL; 1333 error = -EINVAL;
1300 if (reverse_path_check()) 1334 if (full_check && reverse_path_check())
1301 goto error_remove_epi; 1335 goto error_remove_epi;
1302 1336
1303 /* We have to drop the new item inside our item list to keep track of it */ 1337 /* We have to drop the new item inside our item list to keep track of it */
@@ -1327,8 +1361,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1327 1361
1328error_remove_epi: 1362error_remove_epi:
1329 spin_lock(&tfile->f_lock); 1363 spin_lock(&tfile->f_lock);
1330 if (ep_is_linked(&epi->fllink)) 1364 list_del_rcu(&epi->fllink);
1331 list_del_init(&epi->fllink);
1332 spin_unlock(&tfile->f_lock); 1365 spin_unlock(&tfile->f_lock);
1333 1366
1334 rb_erase(&epi->rbn, &ep->rbr); 1367 rb_erase(&epi->rbn, &ep->rbr);
@@ -1521,7 +1554,7 @@ static int ep_send_events(struct eventpoll *ep,
1521 esed.maxevents = maxevents; 1554 esed.maxevents = maxevents;
1522 esed.events = events; 1555 esed.events = events;
1523 1556
1524 return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0); 1557 return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
1525} 1558}
1526 1559
1527static inline struct timespec ep_set_mstimeout(long ms) 1560static inline struct timespec ep_set_mstimeout(long ms)
@@ -1791,11 +1824,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1791 struct epoll_event __user *, event) 1824 struct epoll_event __user *, event)
1792{ 1825{
1793 int error; 1826 int error;
1794 int did_lock_epmutex = 0; 1827 int full_check = 0;
1795 struct fd f, tf; 1828 struct fd f, tf;
1796 struct eventpoll *ep; 1829 struct eventpoll *ep;
1797 struct epitem *epi; 1830 struct epitem *epi;
1798 struct epoll_event epds; 1831 struct epoll_event epds;
1832 struct eventpoll *tep = NULL;
1799 1833
1800 error = -EFAULT; 1834 error = -EFAULT;
1801 if (ep_op_has_event(op) && 1835 if (ep_op_has_event(op) &&
@@ -1844,26 +1878,40 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1844 * and hang them on the tfile_check_list, so we can check that we 1878 * and hang them on the tfile_check_list, so we can check that we
1845 * haven't created too many possible wakeup paths. 1879 * haven't created too many possible wakeup paths.
1846 * 1880 *
1847 * We need to hold the epmutex across both ep_insert and ep_remove 1881 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
1848 * b/c we want to make sure we are looking at a coherent view of 1882 * the epoll file descriptor is attaching directly to a wakeup source,
1849 * epoll network. 1883 * unless the epoll file descriptor is nested. The purpose of taking the
1884 * 'epmutex' on add is to prevent complex toplogies such as loops and
1885 * deep wakeup paths from forming in parallel through multiple
1886 * EPOLL_CTL_ADD operations.
1850 */ 1887 */
1851 if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) { 1888 mutex_lock_nested(&ep->mtx, 0);
1852 mutex_lock(&epmutex);
1853 did_lock_epmutex = 1;
1854 }
1855 if (op == EPOLL_CTL_ADD) { 1889 if (op == EPOLL_CTL_ADD) {
1856 if (is_file_epoll(tf.file)) { 1890 if (!list_empty(&f.file->f_ep_links) ||
1857 error = -ELOOP; 1891 is_file_epoll(tf.file)) {
1858 if (ep_loop_check(ep, tf.file) != 0) { 1892 full_check = 1;
1859 clear_tfile_check_list(); 1893 mutex_unlock(&ep->mtx);
1860 goto error_tgt_fput; 1894 mutex_lock(&epmutex);
1895 if (is_file_epoll(tf.file)) {
1896 error = -ELOOP;
1897 if (ep_loop_check(ep, tf.file) != 0) {
1898 clear_tfile_check_list();
1899 goto error_tgt_fput;
1900 }
1901 } else
1902 list_add(&tf.file->f_tfile_llink,
1903 &tfile_check_list);
1904 mutex_lock_nested(&ep->mtx, 0);
1905 if (is_file_epoll(tf.file)) {
1906 tep = tf.file->private_data;
1907 mutex_lock_nested(&tep->mtx, 1);
1861 } 1908 }
1862 } else 1909 }
1863 list_add(&tf.file->f_tfile_llink, &tfile_check_list); 1910 }
1911 if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
1912 tep = tf.file->private_data;
1913 mutex_lock_nested(&tep->mtx, 1);
1864 } 1914 }
1865
1866 mutex_lock_nested(&ep->mtx, 0);
1867 1915
1868 /* 1916 /*
1869 * Try to lookup the file inside our RB tree, Since we grabbed "mtx" 1917 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
@@ -1877,10 +1925,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1877 case EPOLL_CTL_ADD: 1925 case EPOLL_CTL_ADD:
1878 if (!epi) { 1926 if (!epi) {
1879 epds.events |= POLLERR | POLLHUP; 1927 epds.events |= POLLERR | POLLHUP;
1880 error = ep_insert(ep, &epds, tf.file, fd); 1928 error = ep_insert(ep, &epds, tf.file, fd, full_check);
1881 } else 1929 } else
1882 error = -EEXIST; 1930 error = -EEXIST;
1883 clear_tfile_check_list(); 1931 if (full_check)
1932 clear_tfile_check_list();
1884 break; 1933 break;
1885 case EPOLL_CTL_DEL: 1934 case EPOLL_CTL_DEL:
1886 if (epi) 1935 if (epi)
@@ -1896,10 +1945,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1896 error = -ENOENT; 1945 error = -ENOENT;
1897 break; 1946 break;
1898 } 1947 }
1948 if (tep != NULL)
1949 mutex_unlock(&tep->mtx);
1899 mutex_unlock(&ep->mtx); 1950 mutex_unlock(&ep->mtx);
1900 1951
1901error_tgt_fput: 1952error_tgt_fput:
1902 if (did_lock_epmutex) 1953 if (full_check)
1903 mutex_unlock(&epmutex); 1954 mutex_unlock(&epmutex);
1904 1955
1905 fdput(tf); 1956 fdput(tf);
diff --git a/fs/exec.c b/fs/exec.c
index be4c81c7251c..977319fd77f3 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1664,6 +1664,12 @@ int __get_dumpable(unsigned long mm_flags)
1664 return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret; 1664 return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
1665} 1665}
1666 1666
1667/*
1668 * This returns the actual value of the suid_dumpable flag. For things
1669 * that are using this for checking for privilege transitions, it must
1670 * test against SUID_DUMP_USER rather than treating it as a boolean
1671 * value.
1672 */
1667int get_dumpable(struct mm_struct *mm) 1673int get_dumpable(struct mm_struct *mm)
1668{ 1674{
1669 return __get_dumpable(mm->flags); 1675 return __get_dumpable(mm->flags);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 09c11329a17c..1f4a10ece2f1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -40,13 +40,18 @@
40struct wb_writeback_work { 40struct wb_writeback_work {
41 long nr_pages; 41 long nr_pages;
42 struct super_block *sb; 42 struct super_block *sb;
43 unsigned long *older_than_this; 43 /*
44 * Write only inodes dirtied before this time. Don't forget to set
45 * older_than_this_is_set when you set this.
46 */
47 unsigned long older_than_this;
44 enum writeback_sync_modes sync_mode; 48 enum writeback_sync_modes sync_mode;
45 unsigned int tagged_writepages:1; 49 unsigned int tagged_writepages:1;
46 unsigned int for_kupdate:1; 50 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1; 51 unsigned int range_cyclic:1;
48 unsigned int for_background:1; 52 unsigned int for_background:1;
49 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 53 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
54 unsigned int older_than_this_is_set:1;
50 enum wb_reason reason; /* why was writeback initiated? */ 55 enum wb_reason reason; /* why was writeback initiated? */
51 56
52 struct list_head list; /* pending work list */ 57 struct list_head list; /* pending work list */
@@ -247,10 +252,10 @@ static int move_expired_inodes(struct list_head *delaying_queue,
247 int do_sb_sort = 0; 252 int do_sb_sort = 0;
248 int moved = 0; 253 int moved = 0;
249 254
255 WARN_ON_ONCE(!work->older_than_this_is_set);
250 while (!list_empty(delaying_queue)) { 256 while (!list_empty(delaying_queue)) {
251 inode = wb_inode(delaying_queue->prev); 257 inode = wb_inode(delaying_queue->prev);
252 if (work->older_than_this && 258 if (inode_dirtied_after(inode, work->older_than_this))
253 inode_dirtied_after(inode, *work->older_than_this))
254 break; 259 break;
255 list_move(&inode->i_wb_list, &tmp); 260 list_move(&inode->i_wb_list, &tmp);
256 moved++; 261 moved++;
@@ -734,6 +739,8 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
734 .sync_mode = WB_SYNC_NONE, 739 .sync_mode = WB_SYNC_NONE,
735 .range_cyclic = 1, 740 .range_cyclic = 1,
736 .reason = reason, 741 .reason = reason,
742 .older_than_this = jiffies,
743 .older_than_this_is_set = 1,
737 }; 744 };
738 745
739 spin_lock(&wb->list_lock); 746 spin_lock(&wb->list_lock);
@@ -792,12 +799,13 @@ static long wb_writeback(struct bdi_writeback *wb,
792{ 799{
793 unsigned long wb_start = jiffies; 800 unsigned long wb_start = jiffies;
794 long nr_pages = work->nr_pages; 801 long nr_pages = work->nr_pages;
795 unsigned long oldest_jif;
796 struct inode *inode; 802 struct inode *inode;
797 long progress; 803 long progress;
798 804
799 oldest_jif = jiffies; 805 if (!work->older_than_this_is_set) {
800 work->older_than_this = &oldest_jif; 806 work->older_than_this = jiffies;
807 work->older_than_this_is_set = 1;
808 }
801 809
802 spin_lock(&wb->list_lock); 810 spin_lock(&wb->list_lock);
803 for (;;) { 811 for (;;) {
@@ -831,10 +839,10 @@ static long wb_writeback(struct bdi_writeback *wb,
831 * safe. 839 * safe.
832 */ 840 */
833 if (work->for_kupdate) { 841 if (work->for_kupdate) {
834 oldest_jif = jiffies - 842 work->older_than_this = jiffies -
835 msecs_to_jiffies(dirty_expire_interval * 10); 843 msecs_to_jiffies(dirty_expire_interval * 10);
836 } else if (work->for_background) 844 } else if (work->for_background)
837 oldest_jif = jiffies; 845 work->older_than_this = jiffies;
838 846
839 trace_writeback_start(wb->bdi, work); 847 trace_writeback_start(wb->bdi, work);
840 if (list_empty(&wb->b_io)) 848 if (list_empty(&wb->b_io))
@@ -1346,18 +1354,21 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1346 1354
1347/** 1355/**
1348 * sync_inodes_sb - sync sb inode pages 1356 * sync_inodes_sb - sync sb inode pages
1349 * @sb: the superblock 1357 * @sb: the superblock
1358 * @older_than_this: timestamp
1350 * 1359 *
1351 * This function writes and waits on any dirty inode belonging to this 1360 * This function writes and waits on any dirty inode belonging to this
1352 * super_block. 1361 * superblock that has been dirtied before given timestamp.
1353 */ 1362 */
1354void sync_inodes_sb(struct super_block *sb) 1363void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
1355{ 1364{
1356 DECLARE_COMPLETION_ONSTACK(done); 1365 DECLARE_COMPLETION_ONSTACK(done);
1357 struct wb_writeback_work work = { 1366 struct wb_writeback_work work = {
1358 .sb = sb, 1367 .sb = sb,
1359 .sync_mode = WB_SYNC_ALL, 1368 .sync_mode = WB_SYNC_ALL,
1360 .nr_pages = LONG_MAX, 1369 .nr_pages = LONG_MAX,
1370 .older_than_this = older_than_this,
1371 .older_than_this_is_set = 1,
1361 .range_cyclic = 0, 1372 .range_cyclic = 0,
1362 .done = &done, 1373 .done = &done,
1363 .reason = WB_REASON_SYNC, 1374 .reason = WB_REASON_SYNC,
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 2a1d712f85dc..f6bd266d70b5 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -153,11 +153,6 @@ struct hfs_btree_header_rec {
153 u32 reserved3[16]; 153 u32 reserved3[16];
154} __packed; 154} __packed;
155 155
156#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
157#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
158#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
159#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
160
161#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not 156#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
162 used by hfsplus. */ 157 used by hfsplus. */
163#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8. 158#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 0c6540c91167..0fcec8b2a90b 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -15,6 +15,118 @@
15#include "hfsplus_fs.h" 15#include "hfsplus_fs.h"
16#include "hfsplus_raw.h" 16#include "hfsplus_raw.h"
17 17
18/*
19 * Initial source code of clump size calculation is gotten
20 * from http://opensource.apple.com/tarballs/diskdev_cmds/
21 */
22#define CLUMP_ENTRIES 15
23
24static short clumptbl[CLUMP_ENTRIES * 3] = {
25/*
26 * Volume Attributes Catalog Extents
27 * Size Clump (MB) Clump (MB) Clump (MB)
28 */
29 /* 1GB */ 4, 4, 4,
30 /* 2GB */ 6, 6, 4,
31 /* 4GB */ 8, 8, 4,
32 /* 8GB */ 11, 11, 5,
33 /*
34 * For volumes 16GB and larger, we want to make sure that a full OS
35 * install won't require fragmentation of the Catalog or Attributes
36 * B-trees. We do this by making the clump sizes sufficiently large,
37 * and by leaving a gap after the B-trees for them to grow into.
38 *
39 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
40 * results in:
41 * Catalog B-tree Header
42 * nodeSize: 8192
43 * totalNodes: 31616
44 * freeNodes: 1978
45 * (used = 231.55 MB)
46 * Attributes B-tree Header
47 * nodeSize: 8192
48 * totalNodes: 63232
49 * freeNodes: 958
50 * (used = 486.52 MB)
51 *
52 * We also want Time Machine backup volumes to have a sufficiently
53 * large clump size to reduce fragmentation.
54 *
55 * The series of numbers for Catalog and Attribute form a geometric
56 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
57 * the previous term. For Attributes (16GB to 512GB), each term is
58 * 4**(1/5) times the previous term. For 1TB to 16TB, each term is
59 * 2**(1/5) times the previous term.
60 */
61 /* 16GB */ 64, 32, 5,
62 /* 32GB */ 84, 49, 6,
63 /* 64GB */ 111, 74, 7,
64 /* 128GB */ 147, 111, 8,
65 /* 256GB */ 194, 169, 9,
66 /* 512GB */ 256, 256, 11,
67 /* 1TB */ 294, 294, 14,
68 /* 2TB */ 338, 338, 16,
69 /* 4TB */ 388, 388, 20,
70 /* 8TB */ 446, 446, 25,
71 /* 16TB */ 512, 512, 32
72};
73
74u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
75 u64 sectors, int file_id)
76{
77 u32 mod = max(node_size, block_size);
78 u32 clump_size;
79 int column;
80 int i;
81
82 /* Figure out which column of the above table to use for this file. */
83 switch (file_id) {
84 case HFSPLUS_ATTR_CNID:
85 column = 0;
86 break;
87 case HFSPLUS_CAT_CNID:
88 column = 1;
89 break;
90 default:
91 column = 2;
92 break;
93 }
94
95 /*
96 * The default clump size is 0.8% of the volume size. And
97 * it must also be a multiple of the node and block size.
98 */
99 if (sectors < 0x200000) {
100 clump_size = sectors << 2; /* 0.8 % */
101 if (clump_size < (8 * node_size))
102 clump_size = 8 * node_size;
103 } else {
104 /* turn exponent into table index... */
105 for (i = 0, sectors = sectors >> 22;
106 sectors && (i < CLUMP_ENTRIES - 1);
107 ++i, sectors = sectors >> 1) {
108 /* empty body */
109 }
110
111 clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
112 }
113
114 /*
115 * Round the clump size to a multiple of node and block size.
116 * NOTE: This rounds down.
117 */
118 clump_size /= mod;
119 clump_size *= mod;
120
121 /*
122 * Rounding down could have rounded down to 0 if the block size was
123 * greater than the clump size. If so, just use one block or node.
124 */
125 if (clump_size == 0)
126 clump_size = mod;
127
128 return clump_size;
129}
18 130
19/* Get a reference to a B*Tree and do some initial checks */ 131/* Get a reference to a B*Tree and do some initial checks */
20struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) 132struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 2b9cd01696e2..08846425b67f 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -127,6 +127,14 @@ struct hfs_bnode {
127#define HFS_BNODE_DELETED 4 127#define HFS_BNODE_DELETED 4
128 128
129/* 129/*
130 * Attributes file states
131 */
132#define HFSPLUS_EMPTY_ATTR_TREE 0
133#define HFSPLUS_CREATING_ATTR_TREE 1
134#define HFSPLUS_VALID_ATTR_TREE 2
135#define HFSPLUS_FAILED_ATTR_TREE 3
136
137/*
130 * HFS+ superblock info (built from Volume Header on disk) 138 * HFS+ superblock info (built from Volume Header on disk)
131 */ 139 */
132 140
@@ -141,6 +149,7 @@ struct hfsplus_sb_info {
141 struct hfs_btree *ext_tree; 149 struct hfs_btree *ext_tree;
142 struct hfs_btree *cat_tree; 150 struct hfs_btree *cat_tree;
143 struct hfs_btree *attr_tree; 151 struct hfs_btree *attr_tree;
152 atomic_t attr_tree_state;
144 struct inode *alloc_file; 153 struct inode *alloc_file;
145 struct inode *hidden_dir; 154 struct inode *hidden_dir;
146 struct nls_table *nls; 155 struct nls_table *nls;
@@ -380,6 +389,7 @@ int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *);
380int hfsplus_block_free(struct super_block *, u32, u32); 389int hfsplus_block_free(struct super_block *, u32, u32);
381 390
382/* btree.c */ 391/* btree.c */
392u32 hfsplus_calc_btree_clump_size(u32, u32, u64, int);
383struct hfs_btree *hfs_btree_open(struct super_block *, u32); 393struct hfs_btree *hfs_btree_open(struct super_block *, u32);
384void hfs_btree_close(struct hfs_btree *); 394void hfs_btree_close(struct hfs_btree *);
385int hfs_btree_write(struct hfs_btree *); 395int hfs_btree_write(struct hfs_btree *);
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 452ede01b036..8ffb3a8ffe75 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -156,10 +156,10 @@ struct hfs_bnode_desc {
156} __packed; 156} __packed;
157 157
158/* HFS+ BTree node types */ 158/* HFS+ BTree node types */
159#define HFS_NODE_INDEX 0x00 159#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
160#define HFS_NODE_HEADER 0x01 160#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
161#define HFS_NODE_MAP 0x02 161#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
162#define HFS_NODE_LEAF 0xFF 162#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
163 163
164/* HFS+ BTree header */ 164/* HFS+ BTree header */
165struct hfs_btree_header_rec { 165struct hfs_btree_header_rec {
@@ -187,6 +187,9 @@ struct hfs_btree_header_rec {
187/* HFS+ BTree misc info */ 187/* HFS+ BTree misc info */
188#define HFSPLUS_TREE_HEAD 0 188#define HFSPLUS_TREE_HEAD 0
189#define HFSPLUS_NODE_MXSZ 32768 189#define HFSPLUS_NODE_MXSZ 32768
190#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
191#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
192#define HFSPLUS_BTREE_HDR_USER_BYTES 128
190 193
191/* Some special File ID numbers (stolen from hfs.h) */ 194/* Some special File ID numbers (stolen from hfs.h) */
192#define HFSPLUS_POR_CNID 1 /* Parent Of the Root */ 195#define HFSPLUS_POR_CNID 1 /* Parent Of the Root */
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 4c4d142cf890..80875aa640ef 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -474,12 +474,14 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
474 pr_err("failed to load catalog file\n"); 474 pr_err("failed to load catalog file\n");
475 goto out_close_ext_tree; 475 goto out_close_ext_tree;
476 } 476 }
477 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
477 if (vhdr->attr_file.total_blocks != 0) { 478 if (vhdr->attr_file.total_blocks != 0) {
478 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); 479 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
479 if (!sbi->attr_tree) { 480 if (!sbi->attr_tree) {
480 pr_err("failed to load attributes file\n"); 481 pr_err("failed to load attributes file\n");
481 goto out_close_cat_tree; 482 goto out_close_cat_tree;
482 } 483 }
484 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
483 } 485 }
484 sb->s_xattr = hfsplus_xattr_handlers; 486 sb->s_xattr = hfsplus_xattr_handlers;
485 487
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index bd8471fb9a6a..efc85b1377cc 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -127,6 +127,208 @@ static int can_set_xattr(struct inode *inode, const char *name,
127 return 0; 127 return 0;
128} 128}
129 129
130static void hfsplus_init_header_node(struct inode *attr_file,
131 u32 clump_size,
132 char *buf, size_t node_size)
133{
134 struct hfs_bnode_desc *desc;
135 struct hfs_btree_header_rec *head;
136 u16 offset;
137 __be16 *rec_offsets;
138 u32 hdr_node_map_rec_bits;
139 char *bmp;
140 u32 used_nodes;
141 u32 used_bmp_bytes;
142
143 hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %zu\n",
144 clump_size, node_size);
145
146 /* The end of the node contains list of record offsets */
147 rec_offsets = (__be16 *)(buf + node_size);
148
149 desc = (struct hfs_bnode_desc *)buf;
150 desc->type = HFS_NODE_HEADER;
151 desc->num_recs = cpu_to_be16(HFSPLUS_BTREE_HDR_NODE_RECS_COUNT);
152 offset = sizeof(struct hfs_bnode_desc);
153 *--rec_offsets = cpu_to_be16(offset);
154
155 head = (struct hfs_btree_header_rec *)(buf + offset);
156 head->node_size = cpu_to_be16(node_size);
157 head->node_count = cpu_to_be32(i_size_read(attr_file) / node_size);
158 head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
159 head->clump_size = cpu_to_be32(clump_size);
160 head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
161 head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16));
162 offset += sizeof(struct hfs_btree_header_rec);
163 *--rec_offsets = cpu_to_be16(offset);
164 offset += HFSPLUS_BTREE_HDR_USER_BYTES;
165 *--rec_offsets = cpu_to_be16(offset);
166
167 hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
168 if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) {
169 u32 map_node_bits;
170 u32 map_nodes;
171
172 desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1);
173 map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
174 (2 * sizeof(u16)) - 2);
175 map_nodes = (be32_to_cpu(head->node_count) -
176 hdr_node_map_rec_bits +
177 (map_node_bits - 1)) / map_node_bits;
178 be32_add_cpu(&head->free_nodes, 0 - map_nodes);
179 }
180
181 bmp = buf + offset;
182 used_nodes =
183 be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes);
184 used_bmp_bytes = used_nodes / 8;
185 if (used_bmp_bytes) {
186 memset(bmp, 0xFF, used_bmp_bytes);
187 bmp += used_bmp_bytes;
188 used_nodes %= 8;
189 }
190 *bmp = ~(0xFF >> used_nodes);
191 offset += hdr_node_map_rec_bits / 8;
192 *--rec_offsets = cpu_to_be16(offset);
193}
194
195static int hfsplus_create_attributes_file(struct super_block *sb)
196{
197 int err = 0;
198 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
199 struct inode *attr_file;
200 struct hfsplus_inode_info *hip;
201 u32 clump_size;
202 u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
203 char *buf;
204 int index, written;
205 struct address_space *mapping;
206 struct page *page;
207 int old_state = HFSPLUS_EMPTY_ATTR_TREE;
208
209 hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
210
211check_attr_tree_state_again:
212 switch (atomic_read(&sbi->attr_tree_state)) {
213 case HFSPLUS_EMPTY_ATTR_TREE:
214 if (old_state != atomic_cmpxchg(&sbi->attr_tree_state,
215 old_state,
216 HFSPLUS_CREATING_ATTR_TREE))
217 goto check_attr_tree_state_again;
218 break;
219 case HFSPLUS_CREATING_ATTR_TREE:
220 /*
221 * This state means that another thread is in process
222 * of AttributesFile creation. Theoretically, it is
223 * possible to be here. But really __setxattr() method
224 * first of all calls hfs_find_init() for lookup in
225 * B-tree of CatalogFile. This method locks mutex of
226 * CatalogFile's B-tree. As a result, if some thread
227 * is inside AttributedFile creation operation then
228 * another threads will be waiting unlocking of
229 * CatalogFile's B-tree's mutex. However, if code will
230 * change then we will return error code (-EAGAIN) from
231 * here. Really, it means that first try to set of xattr
232 * fails with error but second attempt will have success.
233 */
234 return -EAGAIN;
235 case HFSPLUS_VALID_ATTR_TREE:
236 return 0;
237 case HFSPLUS_FAILED_ATTR_TREE:
238 return -EOPNOTSUPP;
239 default:
240 BUG();
241 }
242
243 attr_file = hfsplus_iget(sb, HFSPLUS_ATTR_CNID);
244 if (IS_ERR(attr_file)) {
245 pr_err("failed to load attributes file\n");
246 return PTR_ERR(attr_file);
247 }
248
249 BUG_ON(i_size_read(attr_file) != 0);
250
251 hip = HFSPLUS_I(attr_file);
252
253 clump_size = hfsplus_calc_btree_clump_size(sb->s_blocksize,
254 node_size,
255 sbi->sect_count,
256 HFSPLUS_ATTR_CNID);
257
258 mutex_lock(&hip->extents_lock);
259 hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift;
260 mutex_unlock(&hip->extents_lock);
261
262 if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
263 err = -ENOSPC;
264 goto end_attr_file_creation;
265 }
266
267 while (hip->alloc_blocks < hip->clump_blocks) {
268 err = hfsplus_file_extend(attr_file);
269 if (unlikely(err)) {
270 pr_err("failed to extend attributes file\n");
271 goto end_attr_file_creation;
272 }
273 hip->phys_size = attr_file->i_size =
274 (loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift;
275 hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift;
276 inode_set_bytes(attr_file, attr_file->i_size);
277 }
278
279 buf = kzalloc(node_size, GFP_NOFS);
280 if (!buf) {
281 pr_err("failed to allocate memory for header node\n");
282 err = -ENOMEM;
283 goto end_attr_file_creation;
284 }
285
286 hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
287
288 mapping = attr_file->i_mapping;
289
290 index = 0;
291 written = 0;
292 for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
293 void *kaddr;
294
295 page = read_mapping_page(mapping, index, NULL);
296 if (IS_ERR(page)) {
297 err = PTR_ERR(page);
298 goto failed_header_node_init;
299 }
300
301 kaddr = kmap_atomic(page);
302 memcpy(kaddr, buf + written,
303 min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
304 kunmap_atomic(kaddr);
305
306 set_page_dirty(page);
307 page_cache_release(page);
308 }
309
310 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
311
312 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
313 if (!sbi->attr_tree)
314 pr_err("failed to load attributes file\n");
315
316failed_header_node_init:
317 kfree(buf);
318
319end_attr_file_creation:
320 iput(attr_file);
321
322 if (!err)
323 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
324 else if (err == -ENOSPC)
325 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
326 else
327 atomic_set(&sbi->attr_tree_state, HFSPLUS_FAILED_ATTR_TREE);
328
329 return err;
330}
331
130int __hfsplus_setxattr(struct inode *inode, const char *name, 332int __hfsplus_setxattr(struct inode *inode, const char *name,
131 const void *value, size_t size, int flags) 333 const void *value, size_t size, int flags)
132{ 334{
@@ -211,8 +413,9 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
211 } 413 }
212 414
213 if (!HFSPLUS_SB(inode->i_sb)->attr_tree) { 415 if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
214 err = -EOPNOTSUPP; 416 err = hfsplus_create_attributes_file(inode->i_sb);
215 goto end_setxattr; 417 if (unlikely(err))
418 goto end_setxattr;
216 } 419 }
217 420
218 if (hfsplus_attr_exists(inode, name)) { 421 if (hfsplus_attr_exists(inode, name)) {
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 17e6bdde96c5..dc7411fe185d 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1025,7 +1025,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
1025 for(i = count; i < (num_got + count); i++) { 1025 for(i = count; i < (num_got + count); i++) {
1026 bhs[i] = sb_getblk(osb->sb, first_blkno); 1026 bhs[i] = sb_getblk(osb->sb, first_blkno);
1027 if (bhs[i] == NULL) { 1027 if (bhs[i] == NULL) {
1028 status = -EIO; 1028 status = -ENOMEM;
1029 mlog_errno(status); 1029 mlog_errno(status);
1030 goto bail; 1030 goto bail;
1031 } 1031 }
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f37d3c0e2053..aeb44e879c51 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -80,6 +80,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
80 80
81 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 81 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
82 le32_to_cpu(fe->i_clusters))) { 82 le32_to_cpu(fe->i_clusters))) {
83 err = -ENOMEM;
83 mlog(ML_ERROR, "block offset is outside the allocated size: " 84 mlog(ML_ERROR, "block offset is outside the allocated size: "
84 "%llu\n", (unsigned long long)iblock); 85 "%llu\n", (unsigned long long)iblock);
85 goto bail; 86 goto bail;
@@ -92,6 +93,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
92 iblock; 93 iblock;
93 buffer_cache_bh = sb_getblk(osb->sb, blkno); 94 buffer_cache_bh = sb_getblk(osb->sb, blkno);
94 if (!buffer_cache_bh) { 95 if (!buffer_cache_bh) {
96 err = -ENOMEM;
95 mlog(ML_ERROR, "couldn't getblock for symlink!\n"); 97 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
96 goto bail; 98 goto bail;
97 } 99 }
@@ -592,26 +594,11 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
592 ocfs2_rw_unlock(inode, level); 594 ocfs2_rw_unlock(inode, level);
593} 595}
594 596
595/*
596 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
597 * from ext3. PageChecked() bits have been removed as OCFS2 does not
598 * do journalled data.
599 */
600static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
601 unsigned int length)
602{
603 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
604
605 jbd2_journal_invalidatepage(journal, page, offset, length);
606}
607
608static int ocfs2_releasepage(struct page *page, gfp_t wait) 597static int ocfs2_releasepage(struct page *page, gfp_t wait)
609{ 598{
610 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
611
612 if (!page_has_buffers(page)) 599 if (!page_has_buffers(page))
613 return 0; 600 return 0;
614 return jbd2_journal_try_to_free_buffers(journal, page, wait); 601 return try_to_free_buffers(page);
615} 602}
616 603
617static ssize_t ocfs2_direct_IO(int rw, 604static ssize_t ocfs2_direct_IO(int rw,
@@ -1802,8 +1789,7 @@ try_again:
1802 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; 1789 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1803 1790
1804 credits = ocfs2_calc_extend_credits(inode->i_sb, 1791 credits = ocfs2_calc_extend_credits(inode->i_sb,
1805 &di->id2.i_list, 1792 &di->id2.i_list);
1806 clusters_to_alloc);
1807 1793
1808 } 1794 }
1809 1795
@@ -1897,10 +1883,14 @@ out_commit:
1897out: 1883out:
1898 ocfs2_free_write_ctxt(wc); 1884 ocfs2_free_write_ctxt(wc);
1899 1885
1900 if (data_ac) 1886 if (data_ac) {
1901 ocfs2_free_alloc_context(data_ac); 1887 ocfs2_free_alloc_context(data_ac);
1902 if (meta_ac) 1888 data_ac = NULL;
1889 }
1890 if (meta_ac) {
1903 ocfs2_free_alloc_context(meta_ac); 1891 ocfs2_free_alloc_context(meta_ac);
1892 meta_ac = NULL;
1893 }
1904 1894
1905 if (ret == -ENOSPC && try_free) { 1895 if (ret == -ENOSPC && try_free) {
1906 /* 1896 /*
@@ -2087,7 +2077,7 @@ const struct address_space_operations ocfs2_aops = {
2087 .write_end = ocfs2_write_end, 2077 .write_end = ocfs2_write_end,
2088 .bmap = ocfs2_bmap, 2078 .bmap = ocfs2_bmap,
2089 .direct_IO = ocfs2_direct_IO, 2079 .direct_IO = ocfs2_direct_IO,
2090 .invalidatepage = ocfs2_invalidatepage, 2080 .invalidatepage = block_invalidatepage,
2091 .releasepage = ocfs2_releasepage, 2081 .releasepage = ocfs2_releasepage,
2092 .migratepage = buffer_migrate_page, 2082 .migratepage = buffer_migrate_page,
2093 .is_partially_uptodate = block_is_partially_uptodate, 2083 .is_partially_uptodate = block_is_partially_uptodate,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 5d18ad10c27f..5b704c63a103 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -115,7 +115,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
115 if (bhs[i] == NULL) { 115 if (bhs[i] == NULL) {
116 bhs[i] = sb_getblk(osb->sb, block++); 116 bhs[i] = sb_getblk(osb->sb, block++);
117 if (bhs[i] == NULL) { 117 if (bhs[i] == NULL) {
118 status = -EIO; 118 status = -ENOMEM;
119 mlog_errno(status); 119 mlog_errno(status);
120 goto bail; 120 goto bail;
121 } 121 }
@@ -214,7 +214,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
214 bhs[i] = sb_getblk(sb, block++); 214 bhs[i] = sb_getblk(sb, block++);
215 if (bhs[i] == NULL) { 215 if (bhs[i] == NULL) {
216 ocfs2_metadata_cache_io_unlock(ci); 216 ocfs2_metadata_cache_io_unlock(ci);
217 status = -EIO; 217 status = -ENOMEM;
218 mlog_errno(status); 218 mlog_errno(status);
219 goto bail; 219 goto bail;
220 } 220 }
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 363f0dcc924f..73920ffda05b 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -35,6 +35,7 @@
35#include <linux/time.h> 35#include <linux/time.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/bitmap.h>
38 39
39#include "heartbeat.h" 40#include "heartbeat.h"
40#include "tcp.h" 41#include "tcp.h"
@@ -282,15 +283,6 @@ struct o2hb_bio_wait_ctxt {
282 int wc_error; 283 int wc_error;
283}; 284};
284 285
285static int o2hb_pop_count(void *map, int count)
286{
287 int i = -1, pop = 0;
288
289 while ((i = find_next_bit(map, count, i + 1)) < count)
290 pop++;
291 return pop;
292}
293
294static void o2hb_write_timeout(struct work_struct *work) 286static void o2hb_write_timeout(struct work_struct *work)
295{ 287{
296 int failed, quorum; 288 int failed, quorum;
@@ -307,9 +299,9 @@ static void o2hb_write_timeout(struct work_struct *work)
307 spin_lock_irqsave(&o2hb_live_lock, flags); 299 spin_lock_irqsave(&o2hb_live_lock, flags);
308 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 300 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
309 set_bit(reg->hr_region_num, o2hb_failed_region_bitmap); 301 set_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
310 failed = o2hb_pop_count(&o2hb_failed_region_bitmap, 302 failed = bitmap_weight(o2hb_failed_region_bitmap,
311 O2NM_MAX_REGIONS); 303 O2NM_MAX_REGIONS);
312 quorum = o2hb_pop_count(&o2hb_quorum_region_bitmap, 304 quorum = bitmap_weight(o2hb_quorum_region_bitmap,
313 O2NM_MAX_REGIONS); 305 O2NM_MAX_REGIONS);
314 spin_unlock_irqrestore(&o2hb_live_lock, flags); 306 spin_unlock_irqrestore(&o2hb_live_lock, flags);
315 307
@@ -765,7 +757,7 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg)
765 * If global heartbeat active, unpin all regions if the 757 * If global heartbeat active, unpin all regions if the
766 * region count > CUT_OFF 758 * region count > CUT_OFF
767 */ 759 */
768 if (o2hb_pop_count(&o2hb_quorum_region_bitmap, 760 if (bitmap_weight(o2hb_quorum_region_bitmap,
769 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) 761 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
770 o2hb_region_unpin(NULL); 762 o2hb_region_unpin(NULL);
771unlock: 763unlock:
@@ -954,23 +946,9 @@ out:
954 return changed; 946 return changed;
955} 947}
956 948
957/* This could be faster if we just implmented a find_last_bit, but I 949static int o2hb_highest_node(unsigned long *nodes, int numbits)
958 * don't think the circumstances warrant it. */
959static int o2hb_highest_node(unsigned long *nodes,
960 int numbits)
961{ 950{
962 int highest, node; 951 return find_last_bit(nodes, numbits);
963
964 highest = numbits;
965 node = -1;
966 while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
967 if (node >= numbits)
968 break;
969
970 highest = node;
971 }
972
973 return highest;
974} 952}
975 953
976static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) 954static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
@@ -1829,7 +1807,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1829 live_threshold = O2HB_LIVE_THRESHOLD; 1807 live_threshold = O2HB_LIVE_THRESHOLD;
1830 if (o2hb_global_heartbeat_active()) { 1808 if (o2hb_global_heartbeat_active()) {
1831 spin_lock(&o2hb_live_lock); 1809 spin_lock(&o2hb_live_lock);
1832 if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) 1810 if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
1833 live_threshold <<= 1; 1811 live_threshold <<= 1;
1834 spin_unlock(&o2hb_live_lock); 1812 spin_unlock(&o2hb_live_lock);
1835 } 1813 }
@@ -2180,7 +2158,7 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2180 if (!o2hb_dependent_users) 2158 if (!o2hb_dependent_users)
2181 goto unlock; 2159 goto unlock;
2182 2160
2183 if (o2hb_pop_count(&o2hb_quorum_region_bitmap, 2161 if (bitmap_weight(o2hb_quorum_region_bitmap,
2184 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) 2162 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
2185 o2hb_region_pin(NULL); 2163 o2hb_region_pin(NULL);
2186 2164
@@ -2480,7 +2458,7 @@ static int o2hb_region_inc_user(const char *region_uuid)
2480 if (o2hb_dependent_users > 1) 2458 if (o2hb_dependent_users > 1)
2481 goto unlock; 2459 goto unlock;
2482 2460
2483 if (o2hb_pop_count(&o2hb_quorum_region_bitmap, 2461 if (bitmap_weight(o2hb_quorum_region_bitmap,
2484 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) 2462 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
2485 ret = o2hb_region_pin(NULL); 2463 ret = o2hb_region_pin(NULL);
2486 2464
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index baa2b9ef7eef..2260fb9e6508 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -199,7 +199,8 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
199#define mlog_errno(st) do { \ 199#define mlog_errno(st) do { \
200 int _st = (st); \ 200 int _st = (st); \
201 if (_st != -ERESTARTSYS && _st != -EINTR && \ 201 if (_st != -ERESTARTSYS && _st != -EINTR && \
202 _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \ 202 _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC && \
203 _st != -EDQUOT) \
203 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ 204 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
204} while (0) 205} while (0)
205 206
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 30544ce8e9f7..91a7e85ac8fd 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2349,7 +2349,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2349 2349
2350 dx_root_bh = sb_getblk(osb->sb, dr_blkno); 2350 dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2351 if (dx_root_bh == NULL) { 2351 if (dx_root_bh == NULL) {
2352 ret = -EIO; 2352 ret = -ENOMEM;
2353 goto out; 2353 goto out;
2354 } 2354 }
2355 ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh); 2355 ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
@@ -2422,7 +2422,7 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2422 for (i = 0; i < num_dx_leaves; i++) { 2422 for (i = 0; i < num_dx_leaves; i++) {
2423 bh = sb_getblk(osb->sb, start_blk + i); 2423 bh = sb_getblk(osb->sb, start_blk + i);
2424 if (bh == NULL) { 2424 if (bh == NULL) {
2425 ret = -EIO; 2425 ret = -ENOMEM;
2426 goto out; 2426 goto out;
2427 } 2427 }
2428 dx_leaves[i] = bh; 2428 dx_leaves[i] = bh;
@@ -2929,7 +2929,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2929 blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); 2929 blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2930 dirdata_bh = sb_getblk(sb, blkno); 2930 dirdata_bh = sb_getblk(sb, blkno);
2931 if (!dirdata_bh) { 2931 if (!dirdata_bh) {
2932 ret = -EIO; 2932 ret = -ENOMEM;
2933 mlog_errno(ret); 2933 mlog_errno(ret);
2934 goto out_commit; 2934 goto out_commit;
2935 } 2935 }
@@ -3159,7 +3159,7 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
3159 3159
3160 *new_bh = sb_getblk(sb, p_blkno); 3160 *new_bh = sb_getblk(sb, p_blkno);
3161 if (!*new_bh) { 3161 if (!*new_bh) {
3162 status = -EIO; 3162 status = -ENOMEM;
3163 mlog_errno(status); 3163 mlog_errno(status);
3164 goto bail; 3164 goto bail;
3165 } 3165 }
@@ -3284,7 +3284,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
3284 if (ocfs2_dir_resv_allowed(osb)) 3284 if (ocfs2_dir_resv_allowed(osb))
3285 data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv; 3285 data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3286 3286
3287 credits = ocfs2_calc_extend_credits(sb, el, 1); 3287 credits = ocfs2_calc_extend_credits(sb, el);
3288 } else { 3288 } else {
3289 spin_unlock(&OCFS2_I(dir)->ip_lock); 3289 spin_unlock(&OCFS2_I(dir)->ip_lock);
3290 credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; 3290 credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
@@ -3716,7 +3716,7 @@ static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3716{ 3716{
3717 int credits = ocfs2_clusters_to_blocks(osb->sb, 2); 3717 int credits = ocfs2_clusters_to_blocks(osb->sb, 2);
3718 3718
3719 credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list, 1); 3719 credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3720 credits += ocfs2_quota_trans_credits(osb->sb); 3720 credits += ocfs2_quota_trans_credits(osb->sb);
3721 return credits; 3721 return credits;
3722} 3722}
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index cf0f103963b1..af3f7aa73e13 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1885,8 +1885,10 @@ ok:
1885 * up nodes that this node contacted */ 1885 * up nodes that this node contacted */
1886 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1886 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1887 nn+1)) < O2NM_MAX_NODES) { 1887 nn+1)) < O2NM_MAX_NODES) {
1888 if (nn != dlm->node_num && nn != assert->node_idx) 1888 if (nn != dlm->node_num && nn != assert->node_idx) {
1889 master_request = 1; 1889 master_request = 1;
1890 break;
1891 }
1890 } 1892 }
1891 } 1893 }
1892 mle->master = assert->node_idx; 1894 mle->master = assert->node_idx;
@@ -2354,6 +2356,10 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2354 2356
2355 assert_spin_locked(&res->spinlock); 2357 assert_spin_locked(&res->spinlock);
2356 2358
2359 /* delay migration when the lockres is in MIGRATING state */
2360 if (res->state & DLM_LOCK_RES_MIGRATING)
2361 return 0;
2362
2357 if (res->owner != dlm->node_num) 2363 if (res->owner != dlm->node_num)
2358 return 0; 2364 return 0;
2359 2365
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 0b5adca1b178..7035af09cc03 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1886,6 +1886,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1886 if (ml->type == LKM_NLMODE) 1886 if (ml->type == LKM_NLMODE)
1887 goto skip_lvb; 1887 goto skip_lvb;
1888 1888
1889 /*
1890 * If the lock is in the blocked list it can't have a valid lvb,
1891 * so skip it
1892 */
1893 if (ml->list == DLM_BLOCKED_LIST)
1894 goto skip_lvb;
1895
1889 if (!dlm_lvb_is_empty(mres->lvb)) { 1896 if (!dlm_lvb_is_empty(mres->lvb)) {
1890 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1897 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1891 /* other node was trying to update 1898 /* other node was trying to update
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d71903c6068b..6fff128cad16 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -580,7 +580,7 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
580 int did_quota = 0; 580 int did_quota = 0;
581 581
582 /* 582 /*
583 * This function only exists for file systems which don't 583 * Unwritten extent only exists for file systems which
584 * support holes. 584 * support holes.
585 */ 585 */
586 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb)); 586 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
@@ -603,8 +603,7 @@ restart_all:
603 goto leave; 603 goto leave;
604 } 604 }
605 605
606 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list, 606 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
607 clusters_to_add);
608 handle = ocfs2_start_trans(osb, credits); 607 handle = ocfs2_start_trans(osb, credits);
609 if (IS_ERR(handle)) { 608 if (IS_ERR(handle)) {
610 status = PTR_ERR(handle); 609 status = PTR_ERR(handle);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 0b479bab3671..9ff4e8cf9d97 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -524,8 +524,7 @@ static inline int ocfs2_calc_dxi_expand_credits(struct super_block *sb)
524 * the result may be wrong. 524 * the result may be wrong.
525 */ 525 */
526static inline int ocfs2_calc_extend_credits(struct super_block *sb, 526static inline int ocfs2_calc_extend_credits(struct super_block *sb,
527 struct ocfs2_extent_list *root_el, 527 struct ocfs2_extent_list *root_el)
528 u32 bits_wanted)
529{ 528{
530 int bitmap_blocks, sysfile_bitmap_blocks, extent_blocks; 529 int bitmap_blocks, sysfile_bitmap_blocks, extent_blocks;
531 530
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 3d3f3c83065c..631a98213474 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -201,8 +201,7 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
201 } 201 }
202 } 202 }
203 203
204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el, 204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
205 clusters_to_move + 2);
206 205
207 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", 206 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
208 extra_blocks, clusters_to_move, *credits); 207 extra_blocks, clusters_to_move, *credits);
@@ -1067,8 +1066,10 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
1067 if (status) 1066 if (status)
1068 return status; 1067 return status;
1069 1068
1070 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) 1069 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
1070 status = -EPERM;
1071 goto out_drop; 1071 goto out_drop;
1072 }
1072 1073
1073 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { 1074 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1074 status = -EPERM; 1075 status = -EPERM;
@@ -1090,8 +1091,10 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
1090 goto out_free; 1091 goto out_free;
1091 } 1092 }
1092 1093
1093 if (range.me_start > i_size_read(inode)) 1094 if (range.me_start > i_size_read(inode)) {
1095 status = -EINVAL;
1094 goto out_free; 1096 goto out_free;
1097 }
1095 1098
1096 if (range.me_start + range.me_len > i_size_read(inode)) 1099 if (range.me_start + range.me_len > i_size_read(inode))
1097 range.me_len = i_size_read(inode) - range.me_start; 1100 range.me_len = i_size_read(inode) - range.me_start;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index be3f8676a438..4f791f6d27d0 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -489,7 +489,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
489 489
490 *new_fe_bh = sb_getblk(osb->sb, fe_blkno); 490 *new_fe_bh = sb_getblk(osb->sb, fe_blkno);
491 if (!*new_fe_bh) { 491 if (!*new_fe_bh) {
492 status = -EIO; 492 status = -ENOMEM;
493 mlog_errno(status); 493 mlog_errno(status);
494 goto leave; 494 goto leave;
495 } 495 }
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index bf4dfc14bb2c..55767e1ba724 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -612,6 +612,11 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
612 } 612 }
613 613
614 new_bh = sb_getblk(inode->i_sb, first_blkno); 614 new_bh = sb_getblk(inode->i_sb, first_blkno);
615 if (!new_bh) {
616 ret = -ENOMEM;
617 mlog_errno(ret);
618 goto out_commit;
619 }
615 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); 620 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
616 621
617 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, 622 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
@@ -1310,7 +1315,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
1310 1315
1311 new_bh = sb_getblk(sb, blkno); 1316 new_bh = sb_getblk(sb, blkno);
1312 if (new_bh == NULL) { 1317 if (new_bh == NULL) {
1313 ret = -EIO; 1318 ret = -ENOMEM;
1314 mlog_errno(ret); 1319 mlog_errno(ret);
1315 goto out; 1320 goto out;
1316 } 1321 }
@@ -1561,7 +1566,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1561 1566
1562 new_bh = sb_getblk(sb, blkno); 1567 new_bh = sb_getblk(sb, blkno);
1563 if (new_bh == NULL) { 1568 if (new_bh == NULL) {
1564 ret = -EIO; 1569 ret = -ENOMEM;
1565 mlog_errno(ret); 1570 mlog_errno(ret);
1566 goto out; 1571 goto out;
1567 } 1572 }
@@ -2502,8 +2507,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2502 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2507 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2503 *meta_add += ocfs2_extend_meta_needed(et.et_root_el); 2508 *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2504 *credits += ocfs2_calc_extend_credits(sb, 2509 *credits += ocfs2_calc_extend_credits(sb,
2505 et.et_root_el, 2510 et.et_root_el);
2506 ref_blocks);
2507 } else { 2511 } else {
2508 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 2512 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2509 *meta_add += 1; 2513 *meta_add += 1;
@@ -2874,8 +2878,7 @@ static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2874 meta_add = 2878 meta_add =
2875 ocfs2_extend_meta_needed(et->et_root_el); 2879 ocfs2_extend_meta_needed(et->et_root_el);
2876 2880
2877 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el, 2881 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
2878 num_clusters + 2);
2879 2882
2880 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, 2883 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2881 p_cluster, num_clusters, 2884 p_cluster, num_clusters,
@@ -3031,7 +3034,7 @@ int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3031 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3034 for (i = 0; i < blocks; i++, old_block++, new_block++) {
3032 new_bh = sb_getblk(osb->sb, new_block); 3035 new_bh = sb_getblk(osb->sb, new_block);
3033 if (new_bh == NULL) { 3036 if (new_bh == NULL) {
3034 ret = -EIO; 3037 ret = -ENOMEM;
3035 mlog_errno(ret); 3038 mlog_errno(ret);
3036 break; 3039 break;
3037 } 3040 }
@@ -3625,8 +3628,7 @@ int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
3625 3628
3626 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); 3629 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
3627 *credits += ocfs2_calc_extend_credits(inode->i_sb, 3630 *credits += ocfs2_calc_extend_credits(inode->i_sb,
3628 et.et_root_el, 3631 et.et_root_el);
3629 ref_blocks);
3630 } 3632 }
3631 3633
3632out: 3634out:
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index ec55add7604a..822ebc10f281 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -469,6 +469,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
469 struct ocfs2_chain_list *cl; 469 struct ocfs2_chain_list *cl;
470 struct ocfs2_chain_rec *cr; 470 struct ocfs2_chain_rec *cr;
471 u16 cl_bpc; 471 u16 cl_bpc;
472 u64 bg_ptr;
472 473
473 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 474 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
474 return -EROFS; 475 return -EROFS;
@@ -513,7 +514,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
513 ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh); 514 ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
514 if (ret) { 515 if (ret) {
515 mlog_errno(ret); 516 mlog_errno(ret);
516 goto out_unlock; 517 goto out_free_group_bh;
517 } 518 }
518 519
519 trace_ocfs2_group_add((unsigned long long)input->group, 520 trace_ocfs2_group_add((unsigned long long)input->group,
@@ -523,7 +524,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
523 if (IS_ERR(handle)) { 524 if (IS_ERR(handle)) {
524 mlog_errno(PTR_ERR(handle)); 525 mlog_errno(PTR_ERR(handle));
525 ret = -EINVAL; 526 ret = -EINVAL;
526 goto out_unlock; 527 goto out_free_group_bh;
527 } 528 }
528 529
529 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); 530 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
@@ -538,12 +539,14 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
538 } 539 }
539 540
540 group = (struct ocfs2_group_desc *)group_bh->b_data; 541 group = (struct ocfs2_group_desc *)group_bh->b_data;
542 bg_ptr = le64_to_cpu(group->bg_next_group);
541 group->bg_next_group = cr->c_blkno; 543 group->bg_next_group = cr->c_blkno;
542 ocfs2_journal_dirty(handle, group_bh); 544 ocfs2_journal_dirty(handle, group_bh);
543 545
544 ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode), 546 ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
545 main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE); 547 main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
546 if (ret < 0) { 548 if (ret < 0) {
549 group->bg_next_group = cpu_to_le64(bg_ptr);
547 mlog_errno(ret); 550 mlog_errno(ret);
548 goto out_commit; 551 goto out_commit;
549 } 552 }
@@ -574,8 +577,11 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
574 577
575out_commit: 578out_commit:
576 ocfs2_commit_trans(osb, handle); 579 ocfs2_commit_trans(osb, handle);
577out_unlock: 580
581out_free_group_bh:
578 brelse(group_bh); 582 brelse(group_bh);
583
584out_unlock:
579 brelse(main_bm_bh); 585 brelse(main_bm_bh);
580 586
581 ocfs2_inode_unlock(main_bm_inode, 1); 587 ocfs2_inode_unlock(main_bm_inode, 1);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 39abf89697ed..cb7ec0b63ddc 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -643,7 +643,7 @@ error:
643 643
644#define FS_OCFS2_NM 1 644#define FS_OCFS2_NM 1
645 645
646static ctl_table ocfs2_nm_table[] = { 646static struct ctl_table ocfs2_nm_table[] = {
647 { 647 {
648 .procname = "hb_ctl_path", 648 .procname = "hb_ctl_path",
649 .data = ocfs2_hb_ctl_path, 649 .data = ocfs2_hb_ctl_path,
@@ -654,7 +654,7 @@ static ctl_table ocfs2_nm_table[] = {
654 { } 654 { }
655}; 655};
656 656
657static ctl_table ocfs2_mod_table[] = { 657static struct ctl_table ocfs2_mod_table[] = {
658 { 658 {
659 .procname = "nm", 659 .procname = "nm",
660 .data = NULL, 660 .data = NULL,
@@ -665,7 +665,7 @@ static ctl_table ocfs2_mod_table[] = {
665 { } 665 { }
666}; 666};
667 667
668static ctl_table ocfs2_kern_table[] = { 668static struct ctl_table ocfs2_kern_table[] = {
669 { 669 {
670 .procname = "ocfs2", 670 .procname = "ocfs2",
671 .data = NULL, 671 .data = NULL,
@@ -676,7 +676,7 @@ static ctl_table ocfs2_kern_table[] = {
676 { } 676 { }
677}; 677};
678 678
679static ctl_table ocfs2_root_table[] = { 679static struct ctl_table ocfs2_root_table[] = {
680 { 680 {
681 .procname = "fs", 681 .procname = "fs",
682 .data = NULL, 682 .data = NULL,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 5397c07ce608..2c91452c4047 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -481,7 +481,7 @@ ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
481 481
482 bg_bh = sb_getblk(osb->sb, bg_blkno); 482 bg_bh = sb_getblk(osb->sb, bg_blkno);
483 if (!bg_bh) { 483 if (!bg_bh) {
484 status = -EIO; 484 status = -ENOMEM;
485 mlog_errno(status); 485 mlog_errno(status);
486 goto bail; 486 goto bail;
487 } 487 }
@@ -661,7 +661,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
661 661
662 bg_bh = sb_getblk(osb->sb, bg_blkno); 662 bg_bh = sb_getblk(osb->sb, bg_blkno);
663 if (!bg_bh) { 663 if (!bg_bh) {
664 status = -EIO; 664 status = -ENOMEM;
665 mlog_errno(status); 665 mlog_errno(status);
666 goto bail; 666 goto bail;
667 } 667 }
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index d4e81e4a9b04..c41492957aa5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1848,8 +1848,8 @@ static int ocfs2_get_sector(struct super_block *sb,
1848 1848
1849 *bh = sb_getblk(sb, block); 1849 *bh = sb_getblk(sb, block);
1850 if (!*bh) { 1850 if (!*bh) {
1851 mlog_errno(-EIO); 1851 mlog_errno(-ENOMEM);
1852 return -EIO; 1852 return -ENOMEM;
1853 } 1853 }
1854 lock_buffer(*bh); 1854 lock_buffer(*bh);
1855 if (!buffer_dirty(*bh)) 1855 if (!buffer_dirty(*bh))
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6ce0686eab72..f0a1326d9bba 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -377,7 +377,7 @@ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb, 377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
378 xb_blkno + i); 378 xb_blkno + i);
379 if (!bucket->bu_bhs[i]) { 379 if (!bucket->bu_bhs[i]) {
380 rc = -EIO; 380 rc = -ENOMEM;
381 mlog_errno(rc); 381 mlog_errno(rc);
382 break; 382 break;
383 } 383 }
@@ -754,8 +754,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
754 BUG_ON(why == RESTART_META); 754 BUG_ON(why == RESTART_META);
755 755
756 credits = ocfs2_calc_extend_credits(inode->i_sb, 756 credits = ocfs2_calc_extend_credits(inode->i_sb,
757 &vb->vb_xv->xr_list, 757 &vb->vb_xv->xr_list);
758 clusters_to_add);
759 status = ocfs2_extend_trans(handle, credits); 758 status = ocfs2_extend_trans(handle, credits);
760 if (status < 0) { 759 if (status < 0) {
761 status = -ENOMEM; 760 status = -ENOMEM;
@@ -2865,6 +2864,12 @@ static int ocfs2_create_xattr_block(struct inode *inode,
2865 } 2864 }
2866 2865
2867 new_bh = sb_getblk(inode->i_sb, first_blkno); 2866 new_bh = sb_getblk(inode->i_sb, first_blkno);
2867 if (!new_bh) {
2868 ret = -ENOMEM;
2869 mlog_errno(ret);
2870 goto end;
2871 }
2872
2868 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); 2873 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2869 2874
2870 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), 2875 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
@@ -3040,8 +3045,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
3040 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { 3045 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
3041 clusters_add += new_clusters; 3046 clusters_add += new_clusters;
3042 credits += ocfs2_calc_extend_credits(inode->i_sb, 3047 credits += ocfs2_calc_extend_credits(inode->i_sb,
3043 &def_xv.xv.xr_list, 3048 &def_xv.xv.xr_list);
3044 new_clusters);
3045 } 3049 }
3046 3050
3047 goto meta_guess; 3051 goto meta_guess;
@@ -3106,8 +3110,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
3106 if (!ocfs2_xattr_is_local(xe)) 3110 if (!ocfs2_xattr_is_local(xe))
3107 credits += ocfs2_calc_extend_credits( 3111 credits += ocfs2_calc_extend_credits(
3108 inode->i_sb, 3112 inode->i_sb,
3109 &def_xv.xv.xr_list, 3113 &def_xv.xv.xr_list);
3110 new_clusters);
3111 goto out; 3114 goto out;
3112 } 3115 }
3113 } 3116 }
@@ -3132,9 +3135,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
3132 meta_add += ocfs2_extend_meta_needed(&xv->xr_list); 3135 meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
3133 clusters_add += new_clusters - old_clusters; 3136 clusters_add += new_clusters - old_clusters;
3134 credits += ocfs2_calc_extend_credits(inode->i_sb, 3137 credits += ocfs2_calc_extend_credits(inode->i_sb,
3135 &xv->xr_list, 3138 &xv->xr_list);
3136 new_clusters -
3137 old_clusters);
3138 if (value_size >= OCFS2_XATTR_ROOT_SIZE) 3139 if (value_size >= OCFS2_XATTR_ROOT_SIZE)
3139 goto out; 3140 goto out;
3140 } 3141 }
@@ -3180,7 +3181,7 @@ meta_guess:
3180 &xb->xb_attrs.xb_root.xt_list; 3181 &xb->xb_attrs.xb_root.xt_list;
3181 meta_add += ocfs2_extend_meta_needed(el); 3182 meta_add += ocfs2_extend_meta_needed(el);
3182 credits += ocfs2_calc_extend_credits(inode->i_sb, 3183 credits += ocfs2_calc_extend_credits(inode->i_sb,
3183 el, 1); 3184 el);
3184 } else 3185 } else
3185 credits += OCFS2_SUBALLOC_ALLOC + 1; 3186 credits += OCFS2_SUBALLOC_ALLOC + 1;
3186 3187
@@ -6216,8 +6217,7 @@ static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
6216 le16_to_cpu(xv->xr_list.l_next_free_rec); 6217 le16_to_cpu(xv->xr_list.l_next_free_rec);
6217 6218
6218 *credits += ocfs2_calc_extend_credits(sb, 6219 *credits += ocfs2_calc_extend_credits(sb,
6219 &def_xv.xv.xr_list, 6220 &def_xv.xv.xr_list);
6220 le32_to_cpu(xv->xr_clusters));
6221 6221
6222 /* 6222 /*
6223 * If the value is a tree with depth > 1, We don't go deep 6223 * If the value is a tree with depth > 1, We don't go deep
@@ -6782,7 +6782,7 @@ static int ocfs2_lock_reflink_xattr_rec_allocators(
6782 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el); 6782 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
6783 6783
6784 *credits += ocfs2_calc_extend_credits(osb->sb, 6784 *credits += ocfs2_calc_extend_credits(osb->sb,
6785 xt_et->et_root_el, len); 6785 xt_et->et_root_el);
6786 6786
6787 if (metas.num_metas) { 6787 if (metas.num_metas) {
6788 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas, 6788 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 15af6222f8a4..2183fcf41d59 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -31,6 +31,10 @@ config PROC_FS
31config PROC_KCORE 31config PROC_KCORE
32 bool "/proc/kcore support" if !ARM 32 bool "/proc/kcore support" if !ARM
33 depends on PROC_FS && MMU 33 depends on PROC_FS && MMU
34 help
35 Provides a virtual ELF core file of the live kernel. This can
36 be read with gdb and other ELF tools. No modifications can be
37 made using this mechanism.
34 38
35config PROC_VMCORE 39config PROC_VMCORE
36 bool "/proc/vmcore support" 40 bool "/proc/vmcore support"
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 8eaa1ba793fc..28955d4b7218 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -285,19 +285,23 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
285 return rv; 285 return rv;
286} 286}
287 287
288static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 288static unsigned long
289proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
290 unsigned long len, unsigned long pgoff,
291 unsigned long flags)
289{ 292{
290 struct proc_dir_entry *pde = PDE(file_inode(file)); 293 struct proc_dir_entry *pde = PDE(file_inode(file));
291 unsigned long rv = -EIO; 294 unsigned long rv = -EIO;
292 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL; 295 unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
296 unsigned long, unsigned long) = NULL;
293 if (use_pde(pde)) { 297 if (use_pde(pde)) {
294#ifdef CONFIG_MMU 298#ifdef CONFIG_MMU
295 get_unmapped_area = current->mm->get_unmapped_area; 299 get_area = current->mm->get_unmapped_area;
296#endif 300#endif
297 if (pde->proc_fops->get_unmapped_area) 301 if (pde->proc_fops->get_unmapped_area)
298 get_unmapped_area = pde->proc_fops->get_unmapped_area; 302 get_area = pde->proc_fops->get_unmapped_area;
299 if (get_unmapped_area) 303 if (get_area)
300 rv = get_unmapped_area(file, orig_addr, len, pgoff, flags); 304 rv = get_area(file, orig_addr, len, pgoff, flags);
301 unuse_pde(pde); 305 unuse_pde(pde);
302 } 306 }
303 return rv; 307 return rv;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 06ea155e1a59..5ed0e52d6aa0 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -255,8 +255,7 @@ static int kcore_update_ram(void)
255 end_pfn = 0; 255 end_pfn = 0;
256 for_each_node_state(nid, N_MEMORY) { 256 for_each_node_state(nid, N_MEMORY) {
257 unsigned long node_end; 257 unsigned long node_end;
258 node_end = NODE_DATA(nid)->node_start_pfn + 258 node_end = node_end_pfn(nid);
259 NODE_DATA(nid)->node_spanned_pages;
260 if (end_pfn < node_end) 259 if (end_pfn < node_end)
261 end_pfn = node_end; 260 end_pfn = node_end;
262 } 261 }
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 59d85d608898..c805d5b69ba1 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -24,7 +24,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
24{ 24{
25 struct sysinfo i; 25 struct sysinfo i;
26 unsigned long committed; 26 unsigned long committed;
27 unsigned long allowed;
28 struct vmalloc_info vmi; 27 struct vmalloc_info vmi;
29 long cached; 28 long cached;
30 unsigned long pages[NR_LRU_LISTS]; 29 unsigned long pages[NR_LRU_LISTS];
@@ -37,8 +36,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
37 si_meminfo(&i); 36 si_meminfo(&i);
38 si_swapinfo(&i); 37 si_swapinfo(&i);
39 committed = percpu_counter_read_positive(&vm_committed_as); 38 committed = percpu_counter_read_positive(&vm_committed_as);
40 allowed = ((totalram_pages - hugetlb_total_pages())
41 * sysctl_overcommit_ratio / 100) + total_swap_pages;
42 39
43 cached = global_page_state(NR_FILE_PAGES) - 40 cached = global_page_state(NR_FILE_PAGES) -
44 total_swapcache_pages() - i.bufferram; 41 total_swapcache_pages() - i.bufferram;
@@ -147,7 +144,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
147 K(global_page_state(NR_UNSTABLE_NFS)), 144 K(global_page_state(NR_UNSTABLE_NFS)),
148 K(global_page_state(NR_BOUNCE)), 145 K(global_page_state(NR_BOUNCE)),
149 K(global_page_state(NR_WRITEBACK_TEMP)), 146 K(global_page_state(NR_WRITEBACK_TEMP)),
150 K(allowed), 147 K(vm_commit_limit()),
151 K(committed), 148 K(committed),
152 (unsigned long)VMALLOC_TOTAL >> 10, 149 (unsigned long)VMALLOC_TOTAL >> 10,
153 vmi.used >> 10, 150 vmi.used >> 10,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 390bdab01c3c..abbe825d20ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -561,6 +561,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
561 [ilog2(VM_NONLINEAR)] = "nl", 561 [ilog2(VM_NONLINEAR)] = "nl",
562 [ilog2(VM_ARCH_1)] = "ar", 562 [ilog2(VM_ARCH_1)] = "ar",
563 [ilog2(VM_DONTDUMP)] = "dd", 563 [ilog2(VM_DONTDUMP)] = "dd",
564#ifdef CONFIG_MEM_SOFT_DIRTY
565 [ilog2(VM_SOFTDIRTY)] = "sd",
566#endif
564 [ilog2(VM_MIXEDMAP)] = "mm", 567 [ilog2(VM_MIXEDMAP)] = "mm",
565 [ilog2(VM_HUGEPAGE)] = "hg", 568 [ilog2(VM_HUGEPAGE)] = "hg",
566 [ilog2(VM_NOHUGEPAGE)] = "nh", 569 [ilog2(VM_NOHUGEPAGE)] = "nh",
@@ -1387,8 +1390,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1387 struct mm_struct *mm = vma->vm_mm; 1390 struct mm_struct *mm = vma->vm_mm;
1388 struct mm_walk walk = {}; 1391 struct mm_walk walk = {};
1389 struct mempolicy *pol; 1392 struct mempolicy *pol;
1390 int n; 1393 char buffer[64];
1391 char buffer[50]; 1394 int nid;
1392 1395
1393 if (!mm) 1396 if (!mm)
1394 return 0; 1397 return 0;
@@ -1404,10 +1407,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1404 walk.mm = mm; 1407 walk.mm = mm;
1405 1408
1406 pol = get_vma_policy(task, vma, vma->vm_start); 1409 pol = get_vma_policy(task, vma, vma->vm_start);
1407 n = mpol_to_str(buffer, sizeof(buffer), pol); 1410 mpol_to_str(buffer, sizeof(buffer), pol);
1408 mpol_cond_put(pol); 1411 mpol_cond_put(pol);
1409 if (n < 0)
1410 return n;
1411 1412
1412 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1413 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1413 1414
@@ -1460,9 +1461,9 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1460 if (md->writeback) 1461 if (md->writeback)
1461 seq_printf(m, " writeback=%lu", md->writeback); 1462 seq_printf(m, " writeback=%lu", md->writeback);
1462 1463
1463 for_each_node_state(n, N_MEMORY) 1464 for_each_node_state(nid, N_MEMORY)
1464 if (md->node[n]) 1465 if (md->node[nid])
1465 seq_printf(m, " N%d=%lu", n, md->node[n]); 1466 seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1466out: 1467out:
1467 seq_putc(m, '\n'); 1468 seq_putc(m, '\n');
1468 1469
diff --git a/fs/sync.c b/fs/sync.c
index 6c0ca3b75758..f15537452231 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,10 +27,11 @@
27 * wait == 1 case since in that case write_inode() functions do 27 * wait == 1 case since in that case write_inode() functions do
28 * sync_dirty_buffer() and thus effectively write one block at a time. 28 * sync_dirty_buffer() and thus effectively write one block at a time.
29 */ 29 */
30static int __sync_filesystem(struct super_block *sb, int wait) 30static int __sync_filesystem(struct super_block *sb, int wait,
31 unsigned long start)
31{ 32{
32 if (wait) 33 if (wait)
33 sync_inodes_sb(sb); 34 sync_inodes_sb(sb, start);
34 else 35 else
35 writeback_inodes_sb(sb, WB_REASON_SYNC); 36 writeback_inodes_sb(sb, WB_REASON_SYNC);
36 37
@@ -47,6 +48,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
47int sync_filesystem(struct super_block *sb) 48int sync_filesystem(struct super_block *sb)
48{ 49{
49 int ret; 50 int ret;
51 unsigned long start = jiffies;
50 52
51 /* 53 /*
52 * We need to be protected against the filesystem going from 54 * We need to be protected against the filesystem going from
@@ -60,17 +62,17 @@ int sync_filesystem(struct super_block *sb)
60 if (sb->s_flags & MS_RDONLY) 62 if (sb->s_flags & MS_RDONLY)
61 return 0; 63 return 0;
62 64
63 ret = __sync_filesystem(sb, 0); 65 ret = __sync_filesystem(sb, 0, start);
64 if (ret < 0) 66 if (ret < 0)
65 return ret; 67 return ret;
66 return __sync_filesystem(sb, 1); 68 return __sync_filesystem(sb, 1, start);
67} 69}
68EXPORT_SYMBOL_GPL(sync_filesystem); 70EXPORT_SYMBOL_GPL(sync_filesystem);
69 71
70static void sync_inodes_one_sb(struct super_block *sb, void *arg) 72static void sync_inodes_one_sb(struct super_block *sb, void *arg)
71{ 73{
72 if (!(sb->s_flags & MS_RDONLY)) 74 if (!(sb->s_flags & MS_RDONLY))
73 sync_inodes_sb(sb); 75 sync_inodes_sb(sb, *((unsigned long *)arg));
74} 76}
75 77
76static void sync_fs_one_sb(struct super_block *sb, void *arg) 78static void sync_fs_one_sb(struct super_block *sb, void *arg)
@@ -102,9 +104,10 @@ static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
102SYSCALL_DEFINE0(sync) 104SYSCALL_DEFINE0(sync)
103{ 105{
104 int nowait = 0, wait = 1; 106 int nowait = 0, wait = 1;
107 unsigned long start = jiffies;
105 108
106 wakeup_flusher_threads(0, WB_REASON_SYNC); 109 wakeup_flusher_threads(0, WB_REASON_SYNC);
107 iterate_supers(sync_inodes_one_sb, NULL); 110 iterate_supers(sync_inodes_one_sb, &start);
108 iterate_supers(sync_fs_one_sb, &nowait); 111 iterate_supers(sync_fs_one_sb, &nowait);
109 iterate_supers(sync_fs_one_sb, &wait); 112 iterate_supers(sync_fs_one_sb, &wait);
110 iterate_bdevs(fdatawrite_one_bdev, NULL); 113 iterate_bdevs(fdatawrite_one_bdev, NULL);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 15188cc99449..8968f5036fa1 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -918,7 +918,7 @@ xfs_flush_inodes(
918 struct super_block *sb = mp->m_super; 918 struct super_block *sb = mp->m_super;
919 919
920 if (down_read_trylock(&sb->s_umount)) { 920 if (down_read_trylock(&sb->s_umount)) {
921 sync_inodes_sb(sb); 921 sync_inodes_sb(sb, jiffies);
922 up_read(&sb->s_umount); 922 up_read(&sb->s_umount);
923 } 923 }
924} 924}