diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /fs/quota | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'fs/quota')
-rw-r--r-- | fs/quota/Kconfig | 4 | ||||
-rw-r--r-- | fs/quota/dquot.c | 127 | ||||
-rw-r--r-- | fs/quota/quota.c | 41 | ||||
-rw-r--r-- | fs/quota/quota_tree.c | 9 | ||||
-rw-r--r-- | fs/quota/quota_v2.c | 2 |
5 files changed, 99 insertions, 84 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index 3e21b1e2ad3a..880fd9884366 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | config QUOTA | 5 | config QUOTA |
6 | bool "Quota support" | 6 | bool "Quota support" |
7 | select QUOTACTL | ||
7 | help | 8 | help |
8 | If you say Y here, you will be able to set per user limits for disk | 9 | If you say Y here, you will be able to set per user limits for disk |
9 | usage (also called disk quotas). Currently, it works for the | 10 | usage (also called disk quotas). Currently, it works for the |
@@ -65,8 +66,7 @@ config QFMT_V2 | |||
65 | 66 | ||
66 | config QUOTACTL | 67 | config QUOTACTL |
67 | bool | 68 | bool |
68 | depends on XFS_QUOTA || QUOTA | 69 | default n |
69 | default y | ||
70 | 70 | ||
71 | config QUOTACTL_COMPAT | 71 | config QUOTACTL_COMPAT |
72 | bool | 72 | bool |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index aad1316a977f..5b572c89e6c4 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -76,7 +76,7 @@ | |||
76 | #include <linux/buffer_head.h> | 76 | #include <linux/buffer_head.h> |
77 | #include <linux/capability.h> | 77 | #include <linux/capability.h> |
78 | #include <linux/quotaops.h> | 78 | #include <linux/quotaops.h> |
79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ | 79 | #include "../internal.h" /* ugh */ |
80 | 80 | ||
81 | #include <asm/uaccess.h> | 81 | #include <asm/uaccess.h> |
82 | 82 | ||
@@ -133,16 +133,20 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); | |||
133 | EXPORT_SYMBOL(dq_data_lock); | 133 | EXPORT_SYMBOL(dq_data_lock); |
134 | 134 | ||
135 | void __quota_error(struct super_block *sb, const char *func, | 135 | void __quota_error(struct super_block *sb, const char *func, |
136 | const char *fmt, ...) | 136 | const char *fmt, ...) |
137 | { | 137 | { |
138 | va_list args; | ||
139 | |||
140 | if (printk_ratelimit()) { | 138 | if (printk_ratelimit()) { |
139 | va_list args; | ||
140 | struct va_format vaf; | ||
141 | |||
141 | va_start(args, fmt); | 142 | va_start(args, fmt); |
142 | printk(KERN_ERR "Quota error (device %s): %s: ", | 143 | |
143 | sb->s_id, func); | 144 | vaf.fmt = fmt; |
144 | vprintk(fmt, args); | 145 | vaf.va = &args; |
145 | printk("\n"); | 146 | |
147 | printk(KERN_ERR "Quota error (device %s): %s: %pV\n", | ||
148 | sb->s_id, func, &vaf); | ||
149 | |||
146 | va_end(args); | 150 | va_end(args); |
147 | } | 151 | } |
148 | } | 152 | } |
@@ -438,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire); | |||
438 | */ | 442 | */ |
439 | int dquot_commit(struct dquot *dquot) | 443 | int dquot_commit(struct dquot *dquot) |
440 | { | 444 | { |
441 | int ret = 0, ret2 = 0; | 445 | int ret = 0; |
442 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 446 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
443 | 447 | ||
444 | mutex_lock(&dqopt->dqio_mutex); | 448 | mutex_lock(&dqopt->dqio_mutex); |
@@ -450,15 +454,10 @@ int dquot_commit(struct dquot *dquot) | |||
450 | spin_unlock(&dq_list_lock); | 454 | spin_unlock(&dq_list_lock); |
451 | /* Inactive dquot can be only if there was error during read/init | 455 | /* Inactive dquot can be only if there was error during read/init |
452 | * => we have better not writing it */ | 456 | * => we have better not writing it */ |
453 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | 457 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) |
454 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | 458 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); |
455 | if (info_dirty(&dqopt->info[dquot->dq_type])) { | 459 | else |
456 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info( | 460 | ret = -EIO; |
457 | dquot->dq_sb, dquot->dq_type); | ||
458 | } | ||
459 | if (ret >= 0) | ||
460 | ret = ret2; | ||
461 | } | ||
462 | out_sem: | 461 | out_sem: |
463 | mutex_unlock(&dqopt->dqio_mutex); | 462 | mutex_unlock(&dqopt->dqio_mutex); |
464 | return ret; | 463 | return ret; |
@@ -692,8 +691,11 @@ static void prune_dqcache(int count) | |||
692 | * This is called from kswapd when we think we need some | 691 | * This is called from kswapd when we think we need some |
693 | * more memory | 692 | * more memory |
694 | */ | 693 | */ |
695 | static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 694 | static int shrink_dqcache_memory(struct shrinker *shrink, |
695 | struct shrink_control *sc) | ||
696 | { | 696 | { |
697 | int nr = sc->nr_to_scan; | ||
698 | |||
697 | if (nr) { | 699 | if (nr) { |
698 | spin_lock(&dq_list_lock); | 700 | spin_lock(&dq_list_lock); |
699 | prune_dqcache(nr); | 701 | prune_dqcache(nr); |
@@ -896,33 +898,38 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
896 | int reserved = 0; | 898 | int reserved = 0; |
897 | #endif | 899 | #endif |
898 | 900 | ||
899 | spin_lock(&inode_lock); | 901 | spin_lock(&inode_sb_list_lock); |
900 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 902 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
901 | if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) | 903 | spin_lock(&inode->i_lock); |
904 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || | ||
905 | !atomic_read(&inode->i_writecount) || | ||
906 | !dqinit_needed(inode, type)) { | ||
907 | spin_unlock(&inode->i_lock); | ||
902 | continue; | 908 | continue; |
909 | } | ||
903 | #ifdef CONFIG_QUOTA_DEBUG | 910 | #ifdef CONFIG_QUOTA_DEBUG |
904 | if (unlikely(inode_get_rsv_space(inode) > 0)) | 911 | if (unlikely(inode_get_rsv_space(inode) > 0)) |
905 | reserved = 1; | 912 | reserved = 1; |
906 | #endif | 913 | #endif |
907 | if (!atomic_read(&inode->i_writecount)) | ||
908 | continue; | ||
909 | if (!dqinit_needed(inode, type)) | ||
910 | continue; | ||
911 | |||
912 | __iget(inode); | 914 | __iget(inode); |
913 | spin_unlock(&inode_lock); | 915 | spin_unlock(&inode->i_lock); |
916 | spin_unlock(&inode_sb_list_lock); | ||
914 | 917 | ||
915 | iput(old_inode); | 918 | iput(old_inode); |
916 | __dquot_initialize(inode, type); | 919 | __dquot_initialize(inode, type); |
917 | /* We hold a reference to 'inode' so it couldn't have been | 920 | |
918 | * removed from s_inodes list while we dropped the inode_lock. | 921 | /* |
919 | * We cannot iput the inode now as we can be holding the last | 922 | * We hold a reference to 'inode' so it couldn't have been |
920 | * reference and we cannot iput it under inode_lock. So we | 923 | * removed from s_inodes list while we dropped the |
921 | * keep the reference and iput it later. */ | 924 | * inode_sb_list_lock We cannot iput the inode now as we can be |
925 | * holding the last reference and we cannot iput it under | ||
926 | * inode_sb_list_lock. So we keep the reference and iput it | ||
927 | * later. | ||
928 | */ | ||
922 | old_inode = inode; | 929 | old_inode = inode; |
923 | spin_lock(&inode_lock); | 930 | spin_lock(&inode_sb_list_lock); |
924 | } | 931 | } |
925 | spin_unlock(&inode_lock); | 932 | spin_unlock(&inode_sb_list_lock); |
926 | iput(old_inode); | 933 | iput(old_inode); |
927 | 934 | ||
928 | #ifdef CONFIG_QUOTA_DEBUG | 935 | #ifdef CONFIG_QUOTA_DEBUG |
@@ -947,7 +954,7 @@ static inline int dqput_blocks(struct dquot *dquot) | |||
947 | 954 | ||
948 | /* | 955 | /* |
949 | * Remove references to dquots from inode and add dquot to list for freeing | 956 | * Remove references to dquots from inode and add dquot to list for freeing |
950 | * if we have the last referece to dquot | 957 | * if we have the last reference to dquot |
951 | * We can't race with anybody because we hold dqptr_sem for writing... | 958 | * We can't race with anybody because we hold dqptr_sem for writing... |
952 | */ | 959 | */ |
953 | static int remove_inode_dquot_ref(struct inode *inode, int type, | 960 | static int remove_inode_dquot_ref(struct inode *inode, int type, |
@@ -1003,7 +1010,7 @@ static void remove_dquot_ref(struct super_block *sb, int type, | |||
1003 | struct inode *inode; | 1010 | struct inode *inode; |
1004 | int reserved = 0; | 1011 | int reserved = 0; |
1005 | 1012 | ||
1006 | spin_lock(&inode_lock); | 1013 | spin_lock(&inode_sb_list_lock); |
1007 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 1014 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
1008 | /* | 1015 | /* |
1009 | * We have to scan also I_NEW inodes because they can already | 1016 | * We have to scan also I_NEW inodes because they can already |
@@ -1017,7 +1024,7 @@ static void remove_dquot_ref(struct super_block *sb, int type, | |||
1017 | remove_inode_dquot_ref(inode, type, tofree_head); | 1024 | remove_inode_dquot_ref(inode, type, tofree_head); |
1018 | } | 1025 | } |
1019 | } | 1026 | } |
1020 | spin_unlock(&inode_lock); | 1027 | spin_unlock(&inode_sb_list_lock); |
1021 | #ifdef CONFIG_QUOTA_DEBUG | 1028 | #ifdef CONFIG_QUOTA_DEBUG |
1022 | if (reserved) { | 1029 | if (reserved) { |
1023 | printk(KERN_WARNING "VFS (%s): Writes happened after quota" | 1030 | printk(KERN_WARNING "VFS (%s): Writes happened after quota" |
@@ -1386,6 +1393,9 @@ static void __dquot_initialize(struct inode *inode, int type) | |||
1386 | /* Avoid races with quotaoff() */ | 1393 | /* Avoid races with quotaoff() */ |
1387 | if (!sb_has_quota_active(sb, cnt)) | 1394 | if (!sb_has_quota_active(sb, cnt)) |
1388 | continue; | 1395 | continue; |
1396 | /* We could race with quotaon or dqget() could have failed */ | ||
1397 | if (!got[cnt]) | ||
1398 | continue; | ||
1389 | if (!inode->i_dquot[cnt]) { | 1399 | if (!inode->i_dquot[cnt]) { |
1390 | inode->i_dquot[cnt] = got[cnt]; | 1400 | inode->i_dquot[cnt] = got[cnt]; |
1391 | got[cnt] = NULL; | 1401 | got[cnt] = NULL; |
@@ -1736,6 +1746,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1736 | qsize_t rsv_space = 0; | 1746 | qsize_t rsv_space = 0; |
1737 | struct dquot *transfer_from[MAXQUOTAS] = {}; | 1747 | struct dquot *transfer_from[MAXQUOTAS] = {}; |
1738 | int cnt, ret = 0; | 1748 | int cnt, ret = 0; |
1749 | char is_valid[MAXQUOTAS] = {}; | ||
1739 | char warntype_to[MAXQUOTAS]; | 1750 | char warntype_to[MAXQUOTAS]; |
1740 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | 1751 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; |
1741 | 1752 | ||
@@ -1757,8 +1768,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1757 | space = cur_space + rsv_space; | 1768 | space = cur_space + rsv_space; |
1758 | /* Build the transfer_from list and check the limits */ | 1769 | /* Build the transfer_from list and check the limits */ |
1759 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1770 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1771 | /* | ||
1772 | * Skip changes for same uid or gid or for turned off quota-type. | ||
1773 | */ | ||
1760 | if (!transfer_to[cnt]) | 1774 | if (!transfer_to[cnt]) |
1761 | continue; | 1775 | continue; |
1776 | /* Avoid races with quotaoff() */ | ||
1777 | if (!sb_has_quota_active(inode->i_sb, cnt)) | ||
1778 | continue; | ||
1779 | is_valid[cnt] = 1; | ||
1762 | transfer_from[cnt] = inode->i_dquot[cnt]; | 1780 | transfer_from[cnt] = inode->i_dquot[cnt]; |
1763 | ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); | 1781 | ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); |
1764 | if (ret) | 1782 | if (ret) |
@@ -1772,12 +1790,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1772 | * Finally perform the needed transfer from transfer_from to transfer_to | 1790 | * Finally perform the needed transfer from transfer_from to transfer_to |
1773 | */ | 1791 | */ |
1774 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1792 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1775 | /* | 1793 | if (!is_valid[cnt]) |
1776 | * Skip changes for same uid or gid or for turned off quota-type. | ||
1777 | */ | ||
1778 | if (!transfer_to[cnt]) | ||
1779 | continue; | 1794 | continue; |
1780 | |||
1781 | /* Due to IO error we might not have transfer_from[] structure */ | 1795 | /* Due to IO error we might not have transfer_from[] structure */ |
1782 | if (transfer_from[cnt]) { | 1796 | if (transfer_from[cnt]) { |
1783 | warntype_from_inodes[cnt] = | 1797 | warntype_from_inodes[cnt] = |
@@ -1801,18 +1815,19 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1801 | 1815 | ||
1802 | mark_all_dquot_dirty(transfer_from); | 1816 | mark_all_dquot_dirty(transfer_from); |
1803 | mark_all_dquot_dirty(transfer_to); | 1817 | mark_all_dquot_dirty(transfer_to); |
1804 | /* Pass back references to put */ | ||
1805 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1806 | transfer_to[cnt] = transfer_from[cnt]; | ||
1807 | warn: | ||
1808 | flush_warnings(transfer_to, warntype_to); | 1818 | flush_warnings(transfer_to, warntype_to); |
1809 | flush_warnings(transfer_from, warntype_from_inodes); | 1819 | flush_warnings(transfer_from, warntype_from_inodes); |
1810 | flush_warnings(transfer_from, warntype_from_space); | 1820 | flush_warnings(transfer_from, warntype_from_space); |
1811 | return ret; | 1821 | /* Pass back references to put */ |
1822 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1823 | if (is_valid[cnt]) | ||
1824 | transfer_to[cnt] = transfer_from[cnt]; | ||
1825 | return 0; | ||
1812 | over_quota: | 1826 | over_quota: |
1813 | spin_unlock(&dq_data_lock); | 1827 | spin_unlock(&dq_data_lock); |
1814 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1828 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1815 | goto warn; | 1829 | flush_warnings(transfer_to, warntype_to); |
1830 | return ret; | ||
1816 | } | 1831 | } |
1817 | EXPORT_SYMBOL(__dquot_transfer); | 1832 | EXPORT_SYMBOL(__dquot_transfer); |
1818 | 1833 | ||
@@ -2177,8 +2192,8 @@ int dquot_resume(struct super_block *sb, int type) | |||
2177 | } | 2192 | } |
2178 | EXPORT_SYMBOL(dquot_resume); | 2193 | EXPORT_SYMBOL(dquot_resume); |
2179 | 2194 | ||
2180 | int dquot_quota_on_path(struct super_block *sb, int type, int format_id, | 2195 | int dquot_quota_on(struct super_block *sb, int type, int format_id, |
2181 | struct path *path) | 2196 | struct path *path) |
2182 | { | 2197 | { |
2183 | int error = security_quota_on(path->dentry); | 2198 | int error = security_quota_on(path->dentry); |
2184 | if (error) | 2199 | if (error) |
@@ -2192,20 +2207,6 @@ int dquot_quota_on_path(struct super_block *sb, int type, int format_id, | |||
2192 | DQUOT_LIMITS_ENABLED); | 2207 | DQUOT_LIMITS_ENABLED); |
2193 | return error; | 2208 | return error; |
2194 | } | 2209 | } |
2195 | EXPORT_SYMBOL(dquot_quota_on_path); | ||
2196 | |||
2197 | int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name) | ||
2198 | { | ||
2199 | struct path path; | ||
2200 | int error; | ||
2201 | |||
2202 | error = kern_path(name, LOOKUP_FOLLOW, &path); | ||
2203 | if (!error) { | ||
2204 | error = dquot_quota_on_path(sb, type, format_id, &path); | ||
2205 | path_put(&path); | ||
2206 | } | ||
2207 | return error; | ||
2208 | } | ||
2209 | EXPORT_SYMBOL(dquot_quota_on); | 2210 | EXPORT_SYMBOL(dquot_quota_on); |
2210 | 2211 | ||
2211 | /* | 2212 | /* |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index b299961e1edb..b34bdb25490c 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -64,18 +64,15 @@ static int quota_sync_all(int type) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, | 66 | static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, |
67 | void __user *addr) | 67 | struct path *path) |
68 | { | 68 | { |
69 | char *pathname; | 69 | if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta) |
70 | int ret = -ENOSYS; | 70 | return -ENOSYS; |
71 | 71 | if (sb->s_qcop->quota_on_meta) | |
72 | pathname = getname(addr); | 72 | return sb->s_qcop->quota_on_meta(sb, type, id); |
73 | if (IS_ERR(pathname)) | 73 | if (IS_ERR(path)) |
74 | return PTR_ERR(pathname); | 74 | return PTR_ERR(path); |
75 | if (sb->s_qcop->quota_on) | 75 | return sb->s_qcop->quota_on(sb, type, id, path); |
76 | ret = sb->s_qcop->quota_on(sb, type, id, pathname); | ||
77 | putname(pathname); | ||
78 | return ret; | ||
79 | } | 76 | } |
80 | 77 | ||
81 | static int quota_getfmt(struct super_block *sb, int type, void __user *addr) | 78 | static int quota_getfmt(struct super_block *sb, int type, void __user *addr) |
@@ -241,7 +238,7 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, | |||
241 | 238 | ||
242 | /* Copy parameters and call proper function */ | 239 | /* Copy parameters and call proper function */ |
243 | static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | 240 | static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, |
244 | void __user *addr) | 241 | void __user *addr, struct path *path) |
245 | { | 242 | { |
246 | int ret; | 243 | int ret; |
247 | 244 | ||
@@ -256,7 +253,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
256 | 253 | ||
257 | switch (cmd) { | 254 | switch (cmd) { |
258 | case Q_QUOTAON: | 255 | case Q_QUOTAON: |
259 | return quota_quotaon(sb, type, cmd, id, addr); | 256 | return quota_quotaon(sb, type, cmd, id, path); |
260 | case Q_QUOTAOFF: | 257 | case Q_QUOTAOFF: |
261 | if (!sb->s_qcop->quota_off) | 258 | if (!sb->s_qcop->quota_off) |
262 | return -ENOSYS; | 259 | return -ENOSYS; |
@@ -335,6 +332,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
335 | { | 332 | { |
336 | uint cmds, type; | 333 | uint cmds, type; |
337 | struct super_block *sb = NULL; | 334 | struct super_block *sb = NULL; |
335 | struct path path, *pathp = NULL; | ||
338 | int ret; | 336 | int ret; |
339 | 337 | ||
340 | cmds = cmd >> SUBCMDSHIFT; | 338 | cmds = cmd >> SUBCMDSHIFT; |
@@ -351,12 +349,27 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
351 | return -ENODEV; | 349 | return -ENODEV; |
352 | } | 350 | } |
353 | 351 | ||
352 | /* | ||
353 | * Path for quotaon has to be resolved before grabbing superblock | ||
354 | * because that gets s_umount sem which is also possibly needed by path | ||
355 | * resolution (think about autofs) and thus deadlocks could arise. | ||
356 | */ | ||
357 | if (cmds == Q_QUOTAON) { | ||
358 | ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path); | ||
359 | if (ret) | ||
360 | pathp = ERR_PTR(ret); | ||
361 | else | ||
362 | pathp = &path; | ||
363 | } | ||
364 | |||
354 | sb = quotactl_block(special); | 365 | sb = quotactl_block(special); |
355 | if (IS_ERR(sb)) | 366 | if (IS_ERR(sb)) |
356 | return PTR_ERR(sb); | 367 | return PTR_ERR(sb); |
357 | 368 | ||
358 | ret = do_quotactl(sb, type, cmds, id, addr); | 369 | ret = do_quotactl(sb, type, cmds, id, addr, pathp); |
359 | 370 | ||
360 | drop_super(sb); | 371 | drop_super(sb); |
372 | if (pathp && !IS_ERR(pathp)) | ||
373 | path_put(pathp); | ||
361 | return ret; | 374 | return ret; |
362 | } | 375 | } |
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 9e48874eabcc..e41c1becf096 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c | |||
@@ -468,8 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, | |||
468 | return -ENOMEM; | 468 | return -ENOMEM; |
469 | ret = read_blk(info, *blk, buf); | 469 | ret = read_blk(info, *blk, buf); |
470 | if (ret < 0) { | 470 | if (ret < 0) { |
471 | quota_error(dquot->dq_sb, "Can't read quota data " | 471 | quota_error(dquot->dq_sb, "Can't read quota data block %u", |
472 | "block %u", blk); | 472 | *blk); |
473 | goto out_buf; | 473 | goto out_buf; |
474 | } | 474 | } |
475 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); | 475 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
@@ -493,8 +493,9 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, | |||
493 | } else { | 493 | } else { |
494 | ret = write_blk(info, *blk, buf); | 494 | ret = write_blk(info, *blk, buf); |
495 | if (ret < 0) | 495 | if (ret < 0) |
496 | quota_error(dquot->dq_sb, "Can't write quota " | 496 | quota_error(dquot->dq_sb, |
497 | "tree block %u", blk); | 497 | "Can't write quota tree block %u", |
498 | *blk); | ||
498 | } | 499 | } |
499 | } | 500 | } |
500 | out_buf: | 501 | out_buf: |
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 65444d29406b..f1ab3604db5a 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c | |||
@@ -112,7 +112,7 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
112 | if (!info->dqi_priv) { | 112 | if (!info->dqi_priv) { |
113 | printk(KERN_WARNING | 113 | printk(KERN_WARNING |
114 | "Not enough memory for quota information structure.\n"); | 114 | "Not enough memory for quota information structure.\n"); |
115 | return -1; | 115 | return -ENOMEM; |
116 | } | 116 | } |
117 | qinfo = info->dqi_priv; | 117 | qinfo = info->dqi_priv; |
118 | if (version == 0) { | 118 | if (version == 0) { |