aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/xfs.txt3
-rw-r--r--fs/xfs/xfs_acl.c31
-rw-r--r--fs/xfs/xfs_acl.h31
-rw-r--r--fs/xfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/xfs_dquot.c37
-rw-r--r--fs/xfs/xfs_inode.c16
-rw-r--r--fs/xfs/xfs_log_recover.c84
-rw-r--r--fs/xfs/xfs_qm.c40
-rw-r--r--fs/xfs/xfs_quota.h2
-rw-r--r--fs/xfs/xfs_super.c11
10 files changed, 198 insertions, 59 deletions
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 3e4b3dd1e046..83577f0232a0 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
33 removing extended attributes) the on-disk superblock feature 33 removing extended attributes) the on-disk superblock feature
34 bit field will be updated to reflect this format being in use. 34 bit field will be updated to reflect this format being in use.
35 35
36 CRC enabled filesystems always use the attr2 format, and so
37 will reject the noattr2 mount option if it is set.
38
36 barrier 39 barrier
37 Enables the use of block layer write barriers for writes into 40 Enables the use of block layer write barriers for writes into
38 the journal and unwritten extent conversion. This allows for 41 the journal and unwritten extent conversion. This allows for
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 1d32f1d52763..306d883d89bc 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -21,6 +21,8 @@
21#include "xfs_bmap_btree.h" 21#include "xfs_bmap_btree.h"
22#include "xfs_inode.h" 22#include "xfs_inode.h"
23#include "xfs_vnodeops.h" 23#include "xfs_vnodeops.h"
24#include "xfs_sb.h"
25#include "xfs_mount.h"
24#include "xfs_trace.h" 26#include "xfs_trace.h"
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/xattr.h> 28#include <linux/xattr.h>
@@ -34,7 +36,9 @@
34 */ 36 */
35 37
36STATIC struct posix_acl * 38STATIC struct posix_acl *
37xfs_acl_from_disk(struct xfs_acl *aclp) 39xfs_acl_from_disk(
40 struct xfs_acl *aclp,
41 int max_entries)
38{ 42{
39 struct posix_acl_entry *acl_e; 43 struct posix_acl_entry *acl_e;
40 struct posix_acl *acl; 44 struct posix_acl *acl;
@@ -42,7 +46,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
42 unsigned int count, i; 46 unsigned int count, i;
43 47
44 count = be32_to_cpu(aclp->acl_cnt); 48 count = be32_to_cpu(aclp->acl_cnt);
45 if (count > XFS_ACL_MAX_ENTRIES) 49 if (count > max_entries)
46 return ERR_PTR(-EFSCORRUPTED); 50 return ERR_PTR(-EFSCORRUPTED);
47 51
48 acl = posix_acl_alloc(count, GFP_KERNEL); 52 acl = posix_acl_alloc(count, GFP_KERNEL);
@@ -108,9 +112,9 @@ xfs_get_acl(struct inode *inode, int type)
108 struct xfs_inode *ip = XFS_I(inode); 112 struct xfs_inode *ip = XFS_I(inode);
109 struct posix_acl *acl; 113 struct posix_acl *acl;
110 struct xfs_acl *xfs_acl; 114 struct xfs_acl *xfs_acl;
111 int len = sizeof(struct xfs_acl);
112 unsigned char *ea_name; 115 unsigned char *ea_name;
113 int error; 116 int error;
117 int len;
114 118
115 acl = get_cached_acl(inode, type); 119 acl = get_cached_acl(inode, type);
116 if (acl != ACL_NOT_CACHED) 120 if (acl != ACL_NOT_CACHED)
@@ -133,8 +137,8 @@ xfs_get_acl(struct inode *inode, int type)
133 * If we have a cached ACLs value just return it, not need to 137 * If we have a cached ACLs value just return it, not need to
134 * go out to the disk. 138 * go out to the disk.
135 */ 139 */
136 140 len = XFS_ACL_MAX_SIZE(ip->i_mount);
137 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); 141 xfs_acl = kzalloc(len, GFP_KERNEL);
138 if (!xfs_acl) 142 if (!xfs_acl)
139 return ERR_PTR(-ENOMEM); 143 return ERR_PTR(-ENOMEM);
140 144
@@ -153,7 +157,7 @@ xfs_get_acl(struct inode *inode, int type)
153 goto out; 157 goto out;
154 } 158 }
155 159
156 acl = xfs_acl_from_disk(xfs_acl); 160 acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
157 if (IS_ERR(acl)) 161 if (IS_ERR(acl))
158 goto out; 162 goto out;
159 163
@@ -189,16 +193,17 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
189 193
190 if (acl) { 194 if (acl) {
191 struct xfs_acl *xfs_acl; 195 struct xfs_acl *xfs_acl;
192 int len; 196 int len = XFS_ACL_MAX_SIZE(ip->i_mount);
193 197
194 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); 198 xfs_acl = kzalloc(len, GFP_KERNEL);
195 if (!xfs_acl) 199 if (!xfs_acl)
196 return -ENOMEM; 200 return -ENOMEM;
197 201
198 xfs_acl_to_disk(xfs_acl, acl); 202 xfs_acl_to_disk(xfs_acl, acl);
199 len = sizeof(struct xfs_acl) - 203
200 (sizeof(struct xfs_acl_entry) * 204 /* subtract away the unused acl entries */
201 (XFS_ACL_MAX_ENTRIES - acl->a_count)); 205 len -= sizeof(struct xfs_acl_entry) *
206 (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
202 207
203 error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, 208 error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
204 len, ATTR_ROOT); 209 len, ATTR_ROOT);
@@ -243,7 +248,7 @@ xfs_set_mode(struct inode *inode, umode_t mode)
243static int 248static int
244xfs_acl_exists(struct inode *inode, unsigned char *name) 249xfs_acl_exists(struct inode *inode, unsigned char *name)
245{ 250{
246 int len = sizeof(struct xfs_acl); 251 int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
247 252
248 return (xfs_attr_get(XFS_I(inode), name, NULL, &len, 253 return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
249 ATTR_ROOT|ATTR_KERNOVAL) == 0); 254 ATTR_ROOT|ATTR_KERNOVAL) == 0);
@@ -379,7 +384,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
379 goto out_release; 384 goto out_release;
380 385
381 error = -EINVAL; 386 error = -EINVAL;
382 if (acl->a_count > XFS_ACL_MAX_ENTRIES) 387 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
383 goto out_release; 388 goto out_release;
384 389
385 if (type == ACL_TYPE_ACCESS) { 390 if (type == ACL_TYPE_ACCESS) {
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 39632d941354..4016a567b83c 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -22,19 +22,36 @@ struct inode;
22struct posix_acl; 22struct posix_acl;
23struct xfs_inode; 23struct xfs_inode;
24 24
25#define XFS_ACL_MAX_ENTRIES 25
26#define XFS_ACL_NOT_PRESENT (-1) 25#define XFS_ACL_NOT_PRESENT (-1)
27 26
28/* On-disk XFS access control list structure */ 27/* On-disk XFS access control list structure */
28struct xfs_acl_entry {
29 __be32 ae_tag;
30 __be32 ae_id;
31 __be16 ae_perm;
32 __be16 ae_pad; /* fill the implicit hole in the structure */
33};
34
29struct xfs_acl { 35struct xfs_acl {
30 __be32 acl_cnt; 36 __be32 acl_cnt;
31 struct xfs_acl_entry { 37 struct xfs_acl_entry acl_entry[0];
32 __be32 ae_tag;
33 __be32 ae_id;
34 __be16 ae_perm;
35 } acl_entry[XFS_ACL_MAX_ENTRIES];
36}; 38};
37 39
40/*
41 * The number of ACL entries allowed is defined by the on-disk format.
42 * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
43 * limited only by the maximum size of the xattr that stores the information.
44 */
45#define XFS_ACL_MAX_ENTRIES(mp) \
46 (xfs_sb_version_hascrc(&mp->m_sb) \
47 ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
48 sizeof(struct xfs_acl_entry) \
49 : 25)
50
51#define XFS_ACL_MAX_SIZE(mp) \
52 (sizeof(struct xfs_acl) + \
53 sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
54
38/* On-disk XFS extended attribute names */ 55/* On-disk XFS extended attribute names */
39#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE" 56#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
40#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT" 57#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index d788302e506a..31d3cd129269 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -3258,7 +3258,7 @@ xfs_attr3_leaf_inactive(
3258 name_rmt = xfs_attr3_leaf_name_remote(leaf, i); 3258 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
3259 if (name_rmt->valueblk) { 3259 if (name_rmt->valueblk) {
3260 lp->valueblk = be32_to_cpu(name_rmt->valueblk); 3260 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
3261 lp->valuelen = XFS_B_TO_FSB(dp->i_mount, 3261 lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
3262 be32_to_cpu(name_rmt->valuelen)); 3262 be32_to_cpu(name_rmt->valuelen));
3263 lp++; 3263 lp++;
3264 } 3264 }
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index a41f8bf1da37..044e97a33c8d 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -249,8 +249,11 @@ xfs_qm_init_dquot_blk(
249 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 249 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
250 d->dd_diskdq.d_id = cpu_to_be32(curid); 250 d->dd_diskdq.d_id = cpu_to_be32(curid);
251 d->dd_diskdq.d_flags = type; 251 d->dd_diskdq.d_flags = type;
252 if (xfs_sb_version_hascrc(&mp->m_sb)) 252 if (xfs_sb_version_hascrc(&mp->m_sb)) {
253 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); 253 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
254 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
255 XFS_DQUOT_CRC_OFF);
256 }
254 } 257 }
255 258
256 xfs_trans_dquot_buf(tp, bp, 259 xfs_trans_dquot_buf(tp, bp,
@@ -286,23 +289,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
286 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; 289 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
287} 290}
288 291
289STATIC void
290xfs_dquot_buf_calc_crc(
291 struct xfs_mount *mp,
292 struct xfs_buf *bp)
293{
294 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
295 int i;
296
297 if (!xfs_sb_version_hascrc(&mp->m_sb))
298 return;
299
300 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
301 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
302 offsetof(struct xfs_dqblk, dd_crc));
303 }
304}
305
306STATIC bool 292STATIC bool
307xfs_dquot_buf_verify_crc( 293xfs_dquot_buf_verify_crc(
308 struct xfs_mount *mp, 294 struct xfs_mount *mp,
@@ -328,12 +314,11 @@ xfs_dquot_buf_verify_crc(
328 314
329 for (i = 0; i < ndquots; i++, d++) { 315 for (i = 0; i < ndquots; i++, d++) {
330 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 316 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
331 offsetof(struct xfs_dqblk, dd_crc))) 317 XFS_DQUOT_CRC_OFF))
332 return false; 318 return false;
333 if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) 319 if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
334 return false; 320 return false;
335 } 321 }
336
337 return true; 322 return true;
338} 323}
339 324
@@ -393,6 +378,11 @@ xfs_dquot_buf_read_verify(
393 } 378 }
394} 379}
395 380
381/*
382 * we don't calculate the CRC here as that is done when the dquot is flushed to
383 * the buffer after the update is done. This ensures that the dquot in the
384 * buffer always has an up-to-date CRC value.
385 */
396void 386void
397xfs_dquot_buf_write_verify( 387xfs_dquot_buf_write_verify(
398 struct xfs_buf *bp) 388 struct xfs_buf *bp)
@@ -404,7 +394,6 @@ xfs_dquot_buf_write_verify(
404 xfs_buf_ioerror(bp, EFSCORRUPTED); 394 xfs_buf_ioerror(bp, EFSCORRUPTED);
405 return; 395 return;
406 } 396 }
407 xfs_dquot_buf_calc_crc(mp, bp);
408} 397}
409 398
410const struct xfs_buf_ops xfs_dquot_buf_ops = { 399const struct xfs_buf_ops xfs_dquot_buf_ops = {
@@ -1151,11 +1140,17 @@ xfs_qm_dqflush(
1151 * copy the lsn into the on-disk dquot now while we have the in memory 1140 * copy the lsn into the on-disk dquot now while we have the in memory
1152 * dquot here. This can't be done later in the write verifier as we 1141 * dquot here. This can't be done later in the write verifier as we
1153 * can't get access to the log item at that point in time. 1142 * can't get access to the log item at that point in time.
1143 *
1144 * We also calculate the CRC here so that the on-disk dquot in the
1145 * buffer always has a valid CRC. This ensures there is no possibility
1146 * of a dquot without an up-to-date CRC getting to disk.
1154 */ 1147 */
1155 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1148 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1156 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp; 1149 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1157 1150
1158 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); 1151 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1152 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1153 XFS_DQUOT_CRC_OFF);
1159 } 1154 }
1160 1155
1161 /* 1156 /*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index efbe1accb6ca..7f7be5f98f52 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1638,6 +1638,10 @@ xfs_iunlink(
1638 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1638 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1639 offset = ip->i_imap.im_boffset + 1639 offset = ip->i_imap.im_boffset +
1640 offsetof(xfs_dinode_t, di_next_unlinked); 1640 offsetof(xfs_dinode_t, di_next_unlinked);
1641
1642 /* need to recalc the inode CRC if appropriate */
1643 xfs_dinode_calc_crc(mp, dip);
1644
1641 xfs_trans_inode_buf(tp, ibp); 1645 xfs_trans_inode_buf(tp, ibp);
1642 xfs_trans_log_buf(tp, ibp, offset, 1646 xfs_trans_log_buf(tp, ibp, offset,
1643 (offset + sizeof(xfs_agino_t) - 1)); 1647 (offset + sizeof(xfs_agino_t) - 1));
@@ -1723,6 +1727,10 @@ xfs_iunlink_remove(
1723 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1727 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1724 offset = ip->i_imap.im_boffset + 1728 offset = ip->i_imap.im_boffset +
1725 offsetof(xfs_dinode_t, di_next_unlinked); 1729 offsetof(xfs_dinode_t, di_next_unlinked);
1730
1731 /* need to recalc the inode CRC if appropriate */
1732 xfs_dinode_calc_crc(mp, dip);
1733
1726 xfs_trans_inode_buf(tp, ibp); 1734 xfs_trans_inode_buf(tp, ibp);
1727 xfs_trans_log_buf(tp, ibp, offset, 1735 xfs_trans_log_buf(tp, ibp, offset,
1728 (offset + sizeof(xfs_agino_t) - 1)); 1736 (offset + sizeof(xfs_agino_t) - 1));
@@ -1796,6 +1804,10 @@ xfs_iunlink_remove(
1796 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1804 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1797 offset = ip->i_imap.im_boffset + 1805 offset = ip->i_imap.im_boffset +
1798 offsetof(xfs_dinode_t, di_next_unlinked); 1806 offsetof(xfs_dinode_t, di_next_unlinked);
1807
1808 /* need to recalc the inode CRC if appropriate */
1809 xfs_dinode_calc_crc(mp, dip);
1810
1799 xfs_trans_inode_buf(tp, ibp); 1811 xfs_trans_inode_buf(tp, ibp);
1800 xfs_trans_log_buf(tp, ibp, offset, 1812 xfs_trans_log_buf(tp, ibp, offset,
1801 (offset + sizeof(xfs_agino_t) - 1)); 1813 (offset + sizeof(xfs_agino_t) - 1));
@@ -1809,6 +1821,10 @@ xfs_iunlink_remove(
1809 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1821 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1810 ASSERT(next_agino != 0); 1822 ASSERT(next_agino != 0);
1811 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1823 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1824
1825 /* need to recalc the inode CRC if appropriate */
1826 xfs_dinode_calc_crc(mp, last_dip);
1827
1812 xfs_trans_inode_buf(tp, last_ibp); 1828 xfs_trans_inode_buf(tp, last_ibp);
1813 xfs_trans_log_buf(tp, last_ibp, offset, 1829 xfs_trans_log_buf(tp, last_ibp, offset,
1814 (offset + sizeof(xfs_agino_t) - 1)); 1830 (offset + sizeof(xfs_agino_t) - 1));
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index d9e4d3c3991a..45a85ff84da1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1599,10 +1599,43 @@ xlog_recover_add_to_trans(
1599} 1599}
1600 1600
1601/* 1601/*
1602 * Sort the log items in the transaction. Cancelled buffers need 1602 * Sort the log items in the transaction.
1603 * to be put first so they are processed before any items that might 1603 *
1604 * modify the buffers. If they are cancelled, then the modifications 1604 * The ordering constraints are defined by the inode allocation and unlink
1605 * don't need to be replayed. 1605 * behaviour. The rules are:
1606 *
1607 * 1. Every item is only logged once in a given transaction. Hence it
1608 * represents the last logged state of the item. Hence ordering is
1609 * dependent on the order in which operations need to be performed so
1610 * required initial conditions are always met.
1611 *
1612 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1613 * there's nothing to replay from them so we can simply cull them
1614 * from the transaction. However, we can't do that until after we've
1615 * replayed all the other items because they may be dependent on the
1616 * cancelled buffer and replaying the cancelled buffer can remove it
1617 * form the cancelled buffer table. Hence they have tobe done last.
1618 *
1619 * 3. Inode allocation buffers must be replayed before inode items that
1620 * read the buffer and replay changes into it.
1621 *
1622 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1623 * This ensures that inodes are completely flushed to the inode buffer
1624 * in a "free" state before we remove the unlinked inode list pointer.
1625 *
1626 * Hence the ordering needs to be inode allocation buffers first, inode items
1627 * second, inode unlink buffers third and cancelled buffers last.
1628 *
1629 * But there's a problem with that - we can't tell an inode allocation buffer
1630 * apart from a regular buffer, so we can't separate them. We can, however,
1631 * tell an inode unlink buffer from the others, and so we can separate them out
1632 * from all the other buffers and move them to last.
1633 *
1634 * Hence, 4 lists, in order from head to tail:
1635 * - buffer_list for all buffers except cancelled/inode unlink buffers
1636 * - item_list for all non-buffer items
1637 * - inode_buffer_list for inode unlink buffers
1638 * - cancel_list for the cancelled buffers
1606 */ 1639 */
1607STATIC int 1640STATIC int
1608xlog_recover_reorder_trans( 1641xlog_recover_reorder_trans(
@@ -1612,6 +1645,10 @@ xlog_recover_reorder_trans(
1612{ 1645{
1613 xlog_recover_item_t *item, *n; 1646 xlog_recover_item_t *item, *n;
1614 LIST_HEAD(sort_list); 1647 LIST_HEAD(sort_list);
1648 LIST_HEAD(cancel_list);
1649 LIST_HEAD(buffer_list);
1650 LIST_HEAD(inode_buffer_list);
1651 LIST_HEAD(inode_list);
1615 1652
1616 list_splice_init(&trans->r_itemq, &sort_list); 1653 list_splice_init(&trans->r_itemq, &sort_list);
1617 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1654 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
@@ -1619,12 +1656,18 @@ xlog_recover_reorder_trans(
1619 1656
1620 switch (ITEM_TYPE(item)) { 1657 switch (ITEM_TYPE(item)) {
1621 case XFS_LI_BUF: 1658 case XFS_LI_BUF:
1622 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1659 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1623 trace_xfs_log_recover_item_reorder_head(log, 1660 trace_xfs_log_recover_item_reorder_head(log,
1624 trans, item, pass); 1661 trans, item, pass);
1625 list_move(&item->ri_list, &trans->r_itemq); 1662 list_move(&item->ri_list, &cancel_list);
1663 break;
1664 }
1665 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1666 list_move(&item->ri_list, &inode_buffer_list);
1626 break; 1667 break;
1627 } 1668 }
1669 list_move_tail(&item->ri_list, &buffer_list);
1670 break;
1628 case XFS_LI_INODE: 1671 case XFS_LI_INODE:
1629 case XFS_LI_DQUOT: 1672 case XFS_LI_DQUOT:
1630 case XFS_LI_QUOTAOFF: 1673 case XFS_LI_QUOTAOFF:
@@ -1632,7 +1675,7 @@ xlog_recover_reorder_trans(
1632 case XFS_LI_EFI: 1675 case XFS_LI_EFI:
1633 trace_xfs_log_recover_item_reorder_tail(log, 1676 trace_xfs_log_recover_item_reorder_tail(log,
1634 trans, item, pass); 1677 trans, item, pass);
1635 list_move_tail(&item->ri_list, &trans->r_itemq); 1678 list_move_tail(&item->ri_list, &inode_list);
1636 break; 1679 break;
1637 default: 1680 default:
1638 xfs_warn(log->l_mp, 1681 xfs_warn(log->l_mp,
@@ -1643,6 +1686,14 @@ xlog_recover_reorder_trans(
1643 } 1686 }
1644 } 1687 }
1645 ASSERT(list_empty(&sort_list)); 1688 ASSERT(list_empty(&sort_list));
1689 if (!list_empty(&buffer_list))
1690 list_splice(&buffer_list, &trans->r_itemq);
1691 if (!list_empty(&inode_list))
1692 list_splice_tail(&inode_list, &trans->r_itemq);
1693 if (!list_empty(&inode_buffer_list))
1694 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1695 if (!list_empty(&cancel_list))
1696 list_splice_tail(&cancel_list, &trans->r_itemq);
1646 return 0; 1697 return 0;
1647} 1698}
1648 1699
@@ -1861,6 +1912,15 @@ xlog_recover_do_inode_buffer(
1861 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, 1912 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1862 next_unlinked_offset); 1913 next_unlinked_offset);
1863 *buffer_nextp = *logged_nextp; 1914 *buffer_nextp = *logged_nextp;
1915
1916 /*
1917 * If necessary, recalculate the CRC in the on-disk inode. We
1918 * have to leave the inode in a consistent state for whoever
1919 * reads it next....
1920 */
1921 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1922 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1923
1864 } 1924 }
1865 1925
1866 return 0; 1926 return 0;
@@ -2266,6 +2326,12 @@ xfs_qm_dqcheck(
2266 d->dd_diskdq.d_flags = type; 2326 d->dd_diskdq.d_flags = type;
2267 d->dd_diskdq.d_id = cpu_to_be32(id); 2327 d->dd_diskdq.d_id = cpu_to_be32(id);
2268 2328
2329 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2330 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2331 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2332 XFS_DQUOT_CRC_OFF);
2333 }
2334
2269 return errs; 2335 return errs;
2270} 2336}
2271 2337
@@ -2793,6 +2859,10 @@ xlog_recover_dquot_pass2(
2793 } 2859 }
2794 2860
2795 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2861 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2862 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2863 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2864 XFS_DQUOT_CRC_OFF);
2865 }
2796 2866
2797 ASSERT(dq_f->qlf_size == 2); 2867 ASSERT(dq_f->qlf_size == 2);
2798 ASSERT(bp->b_target->bt_mount == mp); 2868 ASSERT(bp->b_target->bt_mount == mp);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index f41702b43003..b75c9bb6e71e 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -41,6 +41,7 @@
41#include "xfs_qm.h" 41#include "xfs_qm.h"
42#include "xfs_trace.h" 42#include "xfs_trace.h"
43#include "xfs_icache.h" 43#include "xfs_icache.h"
44#include "xfs_cksum.h"
44 45
45/* 46/*
46 * The global quota manager. There is only one of these for the entire 47 * The global quota manager. There is only one of these for the entire
@@ -839,7 +840,7 @@ xfs_qm_reset_dqcounts(
839 xfs_dqid_t id, 840 xfs_dqid_t id,
840 uint type) 841 uint type)
841{ 842{
842 xfs_disk_dquot_t *ddq; 843 struct xfs_dqblk *dqb;
843 int j; 844 int j;
844 845
845 trace_xfs_reset_dqcounts(bp, _RET_IP_); 846 trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -853,8 +854,12 @@ xfs_qm_reset_dqcounts(
853 do_div(j, sizeof(xfs_dqblk_t)); 854 do_div(j, sizeof(xfs_dqblk_t));
854 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 855 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
855#endif 856#endif
856 ddq = bp->b_addr; 857 dqb = bp->b_addr;
857 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 858 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
859 struct xfs_disk_dquot *ddq;
860
861 ddq = (struct xfs_disk_dquot *)&dqb[j];
862
858 /* 863 /*
859 * Do a sanity check, and if needed, repair the dqblk. Don't 864 * Do a sanity check, and if needed, repair the dqblk. Don't
860 * output any warnings because it's perfectly possible to 865 * output any warnings because it's perfectly possible to
@@ -871,7 +876,12 @@ xfs_qm_reset_dqcounts(
871 ddq->d_bwarns = 0; 876 ddq->d_bwarns = 0;
872 ddq->d_iwarns = 0; 877 ddq->d_iwarns = 0;
873 ddq->d_rtbwarns = 0; 878 ddq->d_rtbwarns = 0;
874 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 879
880 if (xfs_sb_version_hascrc(&mp->m_sb)) {
881 xfs_update_cksum((char *)&dqb[j],
882 sizeof(struct xfs_dqblk),
883 XFS_DQUOT_CRC_OFF);
884 }
875 } 885 }
876} 886}
877 887
@@ -907,19 +917,29 @@ xfs_qm_dqiter_bufs(
907 XFS_FSB_TO_DADDR(mp, bno), 917 XFS_FSB_TO_DADDR(mp, bno),
908 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 918 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
909 &xfs_dquot_buf_ops); 919 &xfs_dquot_buf_ops);
910 if (error)
911 break;
912 920
913 /* 921 /*
914 * XXX(hch): need to figure out if it makes sense to validate 922 * CRC and validation errors will return a EFSCORRUPTED here. If
915 * the CRC here. 923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
926 * been detected.
916 */ 927 */
928 if (error == EFSCORRUPTED) {
929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 XFS_FSB_TO_DADDR(mp, bno),
931 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 NULL);
933 }
934
935 if (error)
936 break;
937
917 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 938 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
918 xfs_buf_delwri_queue(bp, buffer_list); 939 xfs_buf_delwri_queue(bp, buffer_list);
919 xfs_buf_relse(bp); 940 xfs_buf_relse(bp);
920 /* 941
921 * goto the next block. 942 /* goto the next block. */
922 */
923 bno++; 943 bno++;
924 firstid += mp->m_quotainfo->qi_dqperchunk; 944 firstid += mp->m_quotainfo->qi_dqperchunk;
925 } 945 }
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index c61e31c7d997..c38068f26c55 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -87,6 +87,8 @@ typedef struct xfs_dqblk {
87 uuid_t dd_uuid; /* location information */ 87 uuid_t dd_uuid; /* location information */
88} xfs_dqblk_t; 88} xfs_dqblk_t;
89 89
90#define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc)
91
90/* 92/*
91 * flags for q_flags field in the dquot. 93 * flags for q_flags field in the dquot.
92 */ 94 */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ea341cea68cb..3033ba5e9762 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1373,6 +1373,17 @@ xfs_finish_flags(
1373 } 1373 }
1374 1374
1375 /* 1375 /*
1376 * V5 filesystems always use attr2 format for attributes.
1377 */
1378 if (xfs_sb_version_hascrc(&mp->m_sb) &&
1379 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1380 xfs_warn(mp,
1381"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
1382 MNTOPT_NOATTR2, MNTOPT_ATTR2);
1383 return XFS_ERROR(EINVAL);
1384 }
1385
1386 /*
1376 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1387 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1377 * told by noattr2 to turn it off 1388 * told by noattr2 to turn it off
1378 */ 1389 */