diff options
Diffstat (limited to 'fs')
92 files changed, 2611 insertions, 1657 deletions
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index e95d1b64082c..022574202749 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt | |||
@@ -33,7 +33,7 @@ config ARCH_BINFMT_ELF_RANDOMIZE_PIE | |||
33 | config BINFMT_ELF_FDPIC | 33 | config BINFMT_ELF_FDPIC |
34 | bool "Kernel support for FDPIC ELF binaries" | 34 | bool "Kernel support for FDPIC ELF binaries" |
35 | default y | 35 | default y |
36 | depends on (FRV || BLACKFIN || (SUPERH32 && !MMU)) | 36 | depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) |
37 | help | 37 | help |
38 | ELF FDPIC binaries are based on ELF, but allow the individual load | 38 | ELF FDPIC binaries are based on ELF, but allow the individual load |
39 | segments of a binary to be located in memory independently of each | 39 | segments of a binary to be located in memory independently of each |
@@ -1456,6 +1456,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) | |||
1456 | if (ret < 0) | 1456 | if (ret < 0) |
1457 | goto out; | 1457 | goto out; |
1458 | 1458 | ||
1459 | ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret); | ||
1460 | if (ret < 0) | ||
1461 | goto out; | ||
1462 | |||
1459 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | 1463 | kiocb->ki_nr_segs = kiocb->ki_nbytes; |
1460 | kiocb->ki_cur_seg = 0; | 1464 | kiocb->ki_cur_seg = 0; |
1461 | /* ki_nbytes/left now reflect bytes instead of segs */ | 1465 | /* ki_nbytes/left now reflect bytes instead of segs */ |
@@ -1467,11 +1471,17 @@ out: | |||
1467 | return ret; | 1471 | return ret; |
1468 | } | 1472 | } |
1469 | 1473 | ||
1470 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb) | 1474 | static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) |
1471 | { | 1475 | { |
1476 | int bytes; | ||
1477 | |||
1478 | bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left); | ||
1479 | if (bytes < 0) | ||
1480 | return bytes; | ||
1481 | |||
1472 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | 1482 | kiocb->ki_iovec = &kiocb->ki_inline_vec; |
1473 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | 1483 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; |
1474 | kiocb->ki_iovec->iov_len = kiocb->ki_left; | 1484 | kiocb->ki_iovec->iov_len = bytes; |
1475 | kiocb->ki_nr_segs = 1; | 1485 | kiocb->ki_nr_segs = 1; |
1476 | kiocb->ki_cur_seg = 0; | 1486 | kiocb->ki_cur_seg = 0; |
1477 | return 0; | 1487 | return 0; |
@@ -1496,10 +1506,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1496 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | 1506 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, |
1497 | kiocb->ki_left))) | 1507 | kiocb->ki_left))) |
1498 | break; | 1508 | break; |
1499 | ret = security_file_permission(file, MAY_READ); | 1509 | ret = aio_setup_single_vector(READ, file, kiocb); |
1500 | if (unlikely(ret)) | ||
1501 | break; | ||
1502 | ret = aio_setup_single_vector(kiocb); | ||
1503 | if (ret) | 1510 | if (ret) |
1504 | break; | 1511 | break; |
1505 | ret = -EINVAL; | 1512 | ret = -EINVAL; |
@@ -1514,10 +1521,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1514 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | 1521 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, |
1515 | kiocb->ki_left))) | 1522 | kiocb->ki_left))) |
1516 | break; | 1523 | break; |
1517 | ret = security_file_permission(file, MAY_WRITE); | 1524 | ret = aio_setup_single_vector(WRITE, file, kiocb); |
1518 | if (unlikely(ret)) | ||
1519 | break; | ||
1520 | ret = aio_setup_single_vector(kiocb); | ||
1521 | if (ret) | 1525 | if (ret) |
1522 | break; | 1526 | break; |
1523 | ret = -EINVAL; | 1527 | ret = -EINVAL; |
@@ -1528,9 +1532,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1528 | ret = -EBADF; | 1532 | ret = -EBADF; |
1529 | if (unlikely(!(file->f_mode & FMODE_READ))) | 1533 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1530 | break; | 1534 | break; |
1531 | ret = security_file_permission(file, MAY_READ); | ||
1532 | if (unlikely(ret)) | ||
1533 | break; | ||
1534 | ret = aio_setup_vectored_rw(READ, kiocb, compat); | 1535 | ret = aio_setup_vectored_rw(READ, kiocb, compat); |
1535 | if (ret) | 1536 | if (ret) |
1536 | break; | 1537 | break; |
@@ -1542,9 +1543,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1542 | ret = -EBADF; | 1543 | ret = -EBADF; |
1543 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 1544 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1544 | break; | 1545 | break; |
1545 | ret = security_file_permission(file, MAY_WRITE); | ||
1546 | if (unlikely(ret)) | ||
1547 | break; | ||
1548 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); | 1546 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); |
1549 | if (ret) | 1547 | if (ret) |
1550 | break; | 1548 | break; |
@@ -505,9 +505,14 @@ EXPORT_SYMBOL(bio_clone); | |||
505 | int bio_get_nr_vecs(struct block_device *bdev) | 505 | int bio_get_nr_vecs(struct block_device *bdev) |
506 | { | 506 | { |
507 | struct request_queue *q = bdev_get_queue(bdev); | 507 | struct request_queue *q = bdev_get_queue(bdev); |
508 | return min_t(unsigned, | 508 | int nr_pages; |
509 | |||
510 | nr_pages = min_t(unsigned, | ||
509 | queue_max_segments(q), | 511 | queue_max_segments(q), |
510 | queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); | 512 | queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); |
513 | |||
514 | return min_t(unsigned, nr_pages, BIO_MAX_PAGES); | ||
515 | |||
511 | } | 516 | } |
512 | EXPORT_SYMBOL(bio_get_nr_vecs); | 517 | EXPORT_SYMBOL(bio_get_nr_vecs); |
513 | 518 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index e08f6a20a5bb..ba11c30f302d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -70,7 +70,7 @@ static void bdev_inode_switch_bdi(struct inode *inode, | |||
70 | spin_unlock(&dst->wb.list_lock); | 70 | spin_unlock(&dst->wb.list_lock); |
71 | } | 71 | } |
72 | 72 | ||
73 | static sector_t max_block(struct block_device *bdev) | 73 | sector_t blkdev_max_block(struct block_device *bdev) |
74 | { | 74 | { |
75 | sector_t retval = ~((sector_t)0); | 75 | sector_t retval = ~((sector_t)0); |
76 | loff_t sz = i_size_read(bdev->bd_inode); | 76 | loff_t sz = i_size_read(bdev->bd_inode); |
@@ -163,7 +163,7 @@ static int | |||
163 | blkdev_get_block(struct inode *inode, sector_t iblock, | 163 | blkdev_get_block(struct inode *inode, sector_t iblock, |
164 | struct buffer_head *bh, int create) | 164 | struct buffer_head *bh, int create) |
165 | { | 165 | { |
166 | if (iblock >= max_block(I_BDEV(inode))) { | 166 | if (iblock >= blkdev_max_block(I_BDEV(inode))) { |
167 | if (create) | 167 | if (create) |
168 | return -EIO; | 168 | return -EIO; |
169 | 169 | ||
@@ -185,7 +185,7 @@ static int | |||
185 | blkdev_get_blocks(struct inode *inode, sector_t iblock, | 185 | blkdev_get_blocks(struct inode *inode, sector_t iblock, |
186 | struct buffer_head *bh, int create) | 186 | struct buffer_head *bh, int create) |
187 | { | 187 | { |
188 | sector_t end_block = max_block(I_BDEV(inode)); | 188 | sector_t end_block = blkdev_max_block(I_BDEV(inode)); |
189 | unsigned long max_blocks = bh->b_size >> inode->i_blkbits; | 189 | unsigned long max_blocks = bh->b_size >> inode->i_blkbits; |
190 | 190 | ||
191 | if ((iblock + max_blocks) > end_block) { | 191 | if ((iblock + max_blocks) > end_block) { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a7ffc88a7dbe..e1fe74a2ce16 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2753,7 +2753,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
2753 | * one reference for us, and we leave it for the | 2753 | * one reference for us, and we leave it for the |
2754 | * caller | 2754 | * caller |
2755 | */ | 2755 | */ |
2756 | device->flush_bio = NULL;; | 2756 | device->flush_bio = NULL; |
2757 | bio = bio_alloc(GFP_NOFS, 0); | 2757 | bio = bio_alloc(GFP_NOFS, 0); |
2758 | if (!bio) | 2758 | if (!bio) |
2759 | return -ENOMEM; | 2759 | return -ENOMEM; |
diff --git a/fs/buffer.c b/fs/buffer.c index 351e18ea2e53..ad5938ca357c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -921,6 +921,7 @@ init_page_buffers(struct page *page, struct block_device *bdev, | |||
921 | struct buffer_head *head = page_buffers(page); | 921 | struct buffer_head *head = page_buffers(page); |
922 | struct buffer_head *bh = head; | 922 | struct buffer_head *bh = head; |
923 | int uptodate = PageUptodate(page); | 923 | int uptodate = PageUptodate(page); |
924 | sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode)); | ||
924 | 925 | ||
925 | do { | 926 | do { |
926 | if (!buffer_mapped(bh)) { | 927 | if (!buffer_mapped(bh)) { |
@@ -929,7 +930,8 @@ init_page_buffers(struct page *page, struct block_device *bdev, | |||
929 | bh->b_blocknr = block; | 930 | bh->b_blocknr = block; |
930 | if (uptodate) | 931 | if (uptodate) |
931 | set_buffer_uptodate(bh); | 932 | set_buffer_uptodate(bh); |
932 | set_buffer_mapped(bh); | 933 | if (block < end_block) |
934 | set_buffer_mapped(bh); | ||
933 | } | 935 | } |
934 | block++; | 936 | block++; |
935 | bh = bh->b_this_page; | 937 | bh = bh->b_this_page; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5dcc55197fb3..e0b56d7a19c5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -164,7 +164,8 @@ static const match_table_t cifs_mount_option_tokens = { | |||
164 | { Opt_sign, "sign" }, | 164 | { Opt_sign, "sign" }, |
165 | { Opt_seal, "seal" }, | 165 | { Opt_seal, "seal" }, |
166 | { Opt_direct, "direct" }, | 166 | { Opt_direct, "direct" }, |
167 | { Opt_direct, "forceddirectio" }, | 167 | { Opt_direct, "directio" }, |
168 | { Opt_direct, "forcedirectio" }, | ||
168 | { Opt_strictcache, "strictcache" }, | 169 | { Opt_strictcache, "strictcache" }, |
169 | { Opt_noac, "noac" }, | 170 | { Opt_noac, "noac" }, |
170 | { Opt_fsc, "fsc" }, | 171 | { Opt_fsc, "fsc" }, |
diff --git a/fs/dcache.c b/fs/dcache.c index b80531c91779..8c1ab8fb5012 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -153,16 +153,12 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, | |||
153 | * In contrast, 'ct' and 'tcount' can be from a pathname, and do | 153 | * In contrast, 'ct' and 'tcount' can be from a pathname, and do |
154 | * need the careful unaligned handling. | 154 | * need the careful unaligned handling. |
155 | */ | 155 | */ |
156 | static inline int dentry_cmp(const unsigned char *cs, size_t scount, | 156 | static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) |
157 | const unsigned char *ct, size_t tcount) | ||
158 | { | 157 | { |
159 | unsigned long a,b,mask; | 158 | unsigned long a,b,mask; |
160 | 159 | ||
161 | if (unlikely(scount != tcount)) | ||
162 | return 1; | ||
163 | |||
164 | for (;;) { | 160 | for (;;) { |
165 | a = load_unaligned_zeropad(cs); | 161 | a = *(unsigned long *)cs; |
166 | b = load_unaligned_zeropad(ct); | 162 | b = load_unaligned_zeropad(ct); |
167 | if (tcount < sizeof(unsigned long)) | 163 | if (tcount < sizeof(unsigned long)) |
168 | break; | 164 | break; |
@@ -180,12 +176,8 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, | |||
180 | 176 | ||
181 | #else | 177 | #else |
182 | 178 | ||
183 | static inline int dentry_cmp(const unsigned char *cs, size_t scount, | 179 | static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) |
184 | const unsigned char *ct, size_t tcount) | ||
185 | { | 180 | { |
186 | if (scount != tcount) | ||
187 | return 1; | ||
188 | |||
189 | do { | 181 | do { |
190 | if (*cs != *ct) | 182 | if (*cs != *ct) |
191 | return 1; | 183 | return 1; |
@@ -198,6 +190,30 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, | |||
198 | 190 | ||
199 | #endif | 191 | #endif |
200 | 192 | ||
193 | static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) | ||
194 | { | ||
195 | const unsigned char *cs; | ||
196 | /* | ||
197 | * Be careful about RCU walk racing with rename: | ||
198 | * use ACCESS_ONCE to fetch the name pointer. | ||
199 | * | ||
200 | * NOTE! Even if a rename will mean that the length | ||
201 | * was not loaded atomically, we don't care. The | ||
202 | * RCU walk will check the sequence count eventually, | ||
203 | * and catch it. And we won't overrun the buffer, | ||
204 | * because we're reading the name pointer atomically, | ||
205 | * and a dentry name is guaranteed to be properly | ||
206 | * terminated with a NUL byte. | ||
207 | * | ||
208 | * End result: even if 'len' is wrong, we'll exit | ||
209 | * early because the data cannot match (there can | ||
210 | * be no NUL in the ct/tcount data) | ||
211 | */ | ||
212 | cs = ACCESS_ONCE(dentry->d_name.name); | ||
213 | smp_read_barrier_depends(); | ||
214 | return dentry_string_cmp(cs, ct, tcount); | ||
215 | } | ||
216 | |||
201 | static void __d_free(struct rcu_head *head) | 217 | static void __d_free(struct rcu_head *head) |
202 | { | 218 | { |
203 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); | 219 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); |
@@ -1258,6 +1274,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) | |||
1258 | if (!dentry) | 1274 | if (!dentry) |
1259 | return NULL; | 1275 | return NULL; |
1260 | 1276 | ||
1277 | /* | ||
1278 | * We guarantee that the inline name is always NUL-terminated. | ||
1279 | * This way the memcpy() done by the name switching in rename | ||
1280 | * will still always have a NUL at the end, even if we might | ||
1281 | * be overwriting an internal NUL character | ||
1282 | */ | ||
1283 | dentry->d_iname[DNAME_INLINE_LEN-1] = 0; | ||
1261 | if (name->len > DNAME_INLINE_LEN-1) { | 1284 | if (name->len > DNAME_INLINE_LEN-1) { |
1262 | dname = kmalloc(name->len + 1, GFP_KERNEL); | 1285 | dname = kmalloc(name->len + 1, GFP_KERNEL); |
1263 | if (!dname) { | 1286 | if (!dname) { |
@@ -1267,13 +1290,16 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) | |||
1267 | } else { | 1290 | } else { |
1268 | dname = dentry->d_iname; | 1291 | dname = dentry->d_iname; |
1269 | } | 1292 | } |
1270 | dentry->d_name.name = dname; | ||
1271 | 1293 | ||
1272 | dentry->d_name.len = name->len; | 1294 | dentry->d_name.len = name->len; |
1273 | dentry->d_name.hash = name->hash; | 1295 | dentry->d_name.hash = name->hash; |
1274 | memcpy(dname, name->name, name->len); | 1296 | memcpy(dname, name->name, name->len); |
1275 | dname[name->len] = 0; | 1297 | dname[name->len] = 0; |
1276 | 1298 | ||
1299 | /* Make sure we always see the terminating NUL character */ | ||
1300 | smp_wmb(); | ||
1301 | dentry->d_name.name = dname; | ||
1302 | |||
1277 | dentry->d_count = 1; | 1303 | dentry->d_count = 1; |
1278 | dentry->d_flags = 0; | 1304 | dentry->d_flags = 0; |
1279 | spin_lock_init(&dentry->d_lock); | 1305 | spin_lock_init(&dentry->d_lock); |
@@ -1439,18 +1465,18 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, | |||
1439 | } | 1465 | } |
1440 | 1466 | ||
1441 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { | 1467 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { |
1442 | struct qstr *qstr = &alias->d_name; | ||
1443 | |||
1444 | /* | 1468 | /* |
1445 | * Don't need alias->d_lock here, because aliases with | 1469 | * Don't need alias->d_lock here, because aliases with |
1446 | * d_parent == entry->d_parent are not subject to name or | 1470 | * d_parent == entry->d_parent are not subject to name or |
1447 | * parent changes, because the parent inode i_mutex is held. | 1471 | * parent changes, because the parent inode i_mutex is held. |
1448 | */ | 1472 | */ |
1449 | if (qstr->hash != hash) | 1473 | if (alias->d_name.hash != hash) |
1450 | continue; | 1474 | continue; |
1451 | if (alias->d_parent != entry->d_parent) | 1475 | if (alias->d_parent != entry->d_parent) |
1452 | continue; | 1476 | continue; |
1453 | if (dentry_cmp(qstr->name, qstr->len, name, len)) | 1477 | if (alias->d_name.len != len) |
1478 | continue; | ||
1479 | if (dentry_cmp(alias, name, len)) | ||
1454 | continue; | 1480 | continue; |
1455 | __dget(alias); | 1481 | __dget(alias); |
1456 | return alias; | 1482 | return alias; |
@@ -1489,7 +1515,7 @@ struct dentry *d_make_root(struct inode *root_inode) | |||
1489 | struct dentry *res = NULL; | 1515 | struct dentry *res = NULL; |
1490 | 1516 | ||
1491 | if (root_inode) { | 1517 | if (root_inode) { |
1492 | static const struct qstr name = { .name = "/", .len = 1 }; | 1518 | static const struct qstr name = QSTR_INIT("/", 1); |
1493 | 1519 | ||
1494 | res = __d_alloc(root_inode->i_sb, &name); | 1520 | res = __d_alloc(root_inode->i_sb, &name); |
1495 | if (res) | 1521 | if (res) |
@@ -1727,6 +1753,48 @@ err_out: | |||
1727 | } | 1753 | } |
1728 | EXPORT_SYMBOL(d_add_ci); | 1754 | EXPORT_SYMBOL(d_add_ci); |
1729 | 1755 | ||
1756 | /* | ||
1757 | * Do the slow-case of the dentry name compare. | ||
1758 | * | ||
1759 | * Unlike the dentry_cmp() function, we need to atomically | ||
1760 | * load the name, length and inode information, so that the | ||
1761 | * filesystem can rely on them, and can use the 'name' and | ||
1762 | * 'len' information without worrying about walking off the | ||
1763 | * end of memory etc. | ||
1764 | * | ||
1765 | * Thus the read_seqcount_retry() and the "duplicate" info | ||
1766 | * in arguments (the low-level filesystem should not look | ||
1767 | * at the dentry inode or name contents directly, since | ||
1768 | * rename can change them while we're in RCU mode). | ||
1769 | */ | ||
1770 | enum slow_d_compare { | ||
1771 | D_COMP_OK, | ||
1772 | D_COMP_NOMATCH, | ||
1773 | D_COMP_SEQRETRY, | ||
1774 | }; | ||
1775 | |||
1776 | static noinline enum slow_d_compare slow_dentry_cmp( | ||
1777 | const struct dentry *parent, | ||
1778 | struct inode *inode, | ||
1779 | struct dentry *dentry, | ||
1780 | unsigned int seq, | ||
1781 | const struct qstr *name) | ||
1782 | { | ||
1783 | int tlen = dentry->d_name.len; | ||
1784 | const char *tname = dentry->d_name.name; | ||
1785 | struct inode *i = dentry->d_inode; | ||
1786 | |||
1787 | if (read_seqcount_retry(&dentry->d_seq, seq)) { | ||
1788 | cpu_relax(); | ||
1789 | return D_COMP_SEQRETRY; | ||
1790 | } | ||
1791 | if (parent->d_op->d_compare(parent, inode, | ||
1792 | dentry, i, | ||
1793 | tlen, tname, name)) | ||
1794 | return D_COMP_NOMATCH; | ||
1795 | return D_COMP_OK; | ||
1796 | } | ||
1797 | |||
1730 | /** | 1798 | /** |
1731 | * __d_lookup_rcu - search for a dentry (racy, store-free) | 1799 | * __d_lookup_rcu - search for a dentry (racy, store-free) |
1732 | * @parent: parent dentry | 1800 | * @parent: parent dentry |
@@ -1753,15 +1821,17 @@ EXPORT_SYMBOL(d_add_ci); | |||
1753 | * the returned dentry, so long as its parent's seqlock is checked after the | 1821 | * the returned dentry, so long as its parent's seqlock is checked after the |
1754 | * child is looked up. Thus, an interlocking stepping of sequence lock checks | 1822 | * child is looked up. Thus, an interlocking stepping of sequence lock checks |
1755 | * is formed, giving integrity down the path walk. | 1823 | * is formed, giving integrity down the path walk. |
1824 | * | ||
1825 | * NOTE! The caller *has* to check the resulting dentry against the sequence | ||
1826 | * number we've returned before using any of the resulting dentry state! | ||
1756 | */ | 1827 | */ |
1757 | struct dentry *__d_lookup_rcu(const struct dentry *parent, | 1828 | struct dentry *__d_lookup_rcu(const struct dentry *parent, |
1758 | const struct qstr *name, | 1829 | const struct qstr *name, |
1759 | unsigned *seqp, struct inode **inode) | 1830 | unsigned *seqp, struct inode *inode) |
1760 | { | 1831 | { |
1761 | unsigned int len = name->len; | 1832 | u64 hashlen = name->hash_len; |
1762 | unsigned int hash = name->hash; | ||
1763 | const unsigned char *str = name->name; | 1833 | const unsigned char *str = name->name; |
1764 | struct hlist_bl_head *b = d_hash(parent, hash); | 1834 | struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); |
1765 | struct hlist_bl_node *node; | 1835 | struct hlist_bl_node *node; |
1766 | struct dentry *dentry; | 1836 | struct dentry *dentry; |
1767 | 1837 | ||
@@ -1787,49 +1857,47 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent, | |||
1787 | */ | 1857 | */ |
1788 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { | 1858 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
1789 | unsigned seq; | 1859 | unsigned seq; |
1790 | struct inode *i; | ||
1791 | const char *tname; | ||
1792 | int tlen; | ||
1793 | |||
1794 | if (dentry->d_name.hash != hash) | ||
1795 | continue; | ||
1796 | 1860 | ||
1797 | seqretry: | 1861 | seqretry: |
1798 | seq = read_seqcount_begin(&dentry->d_seq); | 1862 | /* |
1863 | * The dentry sequence count protects us from concurrent | ||
1864 | * renames, and thus protects inode, parent and name fields. | ||
1865 | * | ||
1866 | * The caller must perform a seqcount check in order | ||
1867 | * to do anything useful with the returned dentry, | ||
1868 | * including using the 'd_inode' pointer. | ||
1869 | * | ||
1870 | * NOTE! We do a "raw" seqcount_begin here. That means that | ||
1871 | * we don't wait for the sequence count to stabilize if it | ||
1872 | * is in the middle of a sequence change. If we do the slow | ||
1873 | * dentry compare, we will do seqretries until it is stable, | ||
1874 | * and if we end up with a successful lookup, we actually | ||
1875 | * want to exit RCU lookup anyway. | ||
1876 | */ | ||
1877 | seq = raw_seqcount_begin(&dentry->d_seq); | ||
1799 | if (dentry->d_parent != parent) | 1878 | if (dentry->d_parent != parent) |
1800 | continue; | 1879 | continue; |
1801 | if (d_unhashed(dentry)) | 1880 | if (d_unhashed(dentry)) |
1802 | continue; | 1881 | continue; |
1803 | tlen = dentry->d_name.len; | 1882 | *seqp = seq; |
1804 | tname = dentry->d_name.name; | 1883 | |
1805 | i = dentry->d_inode; | ||
1806 | prefetch(tname); | ||
1807 | /* | ||
1808 | * This seqcount check is required to ensure name and | ||
1809 | * len are loaded atomically, so as not to walk off the | ||
1810 | * edge of memory when walking. If we could load this | ||
1811 | * atomically some other way, we could drop this check. | ||
1812 | */ | ||
1813 | if (read_seqcount_retry(&dentry->d_seq, seq)) | ||
1814 | goto seqretry; | ||
1815 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { | 1884 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { |
1816 | if (parent->d_op->d_compare(parent, *inode, | 1885 | if (dentry->d_name.hash != hashlen_hash(hashlen)) |
1817 | dentry, i, | ||
1818 | tlen, tname, name)) | ||
1819 | continue; | 1886 | continue; |
1820 | } else { | 1887 | switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) { |
1821 | if (dentry_cmp(tname, tlen, str, len)) | 1888 | case D_COMP_OK: |
1889 | return dentry; | ||
1890 | case D_COMP_NOMATCH: | ||
1822 | continue; | 1891 | continue; |
1892 | default: | ||
1893 | goto seqretry; | ||
1894 | } | ||
1823 | } | 1895 | } |
1824 | /* | 1896 | |
1825 | * No extra seqcount check is required after the name | 1897 | if (dentry->d_name.hash_len != hashlen) |
1826 | * compare. The caller must perform a seqcount check in | 1898 | continue; |
1827 | * order to do anything useful with the returned dentry | 1899 | if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) |
1828 | * anyway. | 1900 | return dentry; |
1829 | */ | ||
1830 | *seqp = seq; | ||
1831 | *inode = i; | ||
1832 | return dentry; | ||
1833 | } | 1901 | } |
1834 | return NULL; | 1902 | return NULL; |
1835 | } | 1903 | } |
@@ -1908,8 +1976,6 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) | |||
1908 | rcu_read_lock(); | 1976 | rcu_read_lock(); |
1909 | 1977 | ||
1910 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { | 1978 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
1911 | const char *tname; | ||
1912 | int tlen; | ||
1913 | 1979 | ||
1914 | if (dentry->d_name.hash != hash) | 1980 | if (dentry->d_name.hash != hash) |
1915 | continue; | 1981 | continue; |
@@ -1924,15 +1990,17 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) | |||
1924 | * It is safe to compare names since d_move() cannot | 1990 | * It is safe to compare names since d_move() cannot |
1925 | * change the qstr (protected by d_lock). | 1991 | * change the qstr (protected by d_lock). |
1926 | */ | 1992 | */ |
1927 | tlen = dentry->d_name.len; | ||
1928 | tname = dentry->d_name.name; | ||
1929 | if (parent->d_flags & DCACHE_OP_COMPARE) { | 1993 | if (parent->d_flags & DCACHE_OP_COMPARE) { |
1994 | int tlen = dentry->d_name.len; | ||
1995 | const char *tname = dentry->d_name.name; | ||
1930 | if (parent->d_op->d_compare(parent, parent->d_inode, | 1996 | if (parent->d_op->d_compare(parent, parent->d_inode, |
1931 | dentry, dentry->d_inode, | 1997 | dentry, dentry->d_inode, |
1932 | tlen, tname, name)) | 1998 | tlen, tname, name)) |
1933 | goto next; | 1999 | goto next; |
1934 | } else { | 2000 | } else { |
1935 | if (dentry_cmp(tname, tlen, str, len)) | 2001 | if (dentry->d_name.len != len) |
2002 | goto next; | ||
2003 | if (dentry_cmp(dentry, str, len)) | ||
1936 | goto next; | 2004 | goto next; |
1937 | } | 2005 | } |
1938 | 2006 | ||
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 90e5997262ea..63dc19c54d5a 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c | |||
@@ -310,6 +310,7 @@ void dlm_callback_resume(struct dlm_ls *ls) | |||
310 | } | 310 | } |
311 | mutex_unlock(&ls->ls_cb_mutex); | 311 | mutex_unlock(&ls->ls_cb_mutex); |
312 | 312 | ||
313 | log_debug(ls, "dlm_callback_resume %d", count); | 313 | if (count) |
314 | log_debug(ls, "dlm_callback_resume %d", count); | ||
314 | } | 315 | } |
315 | 316 | ||
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 3a564d197e99..bc342f7ac3af 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/miscdevice.h> | 38 | #include <linux/miscdevice.h> |
39 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
40 | #include <linux/idr.h> | 40 | #include <linux/idr.h> |
41 | #include <linux/ratelimit.h> | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | 43 | ||
43 | #include <linux/dlm.h> | 44 | #include <linux/dlm.h> |
@@ -74,6 +75,13 @@ do { \ | |||
74 | (ls)->ls_name , ##args); \ | 75 | (ls)->ls_name , ##args); \ |
75 | } while (0) | 76 | } while (0) |
76 | 77 | ||
78 | #define log_limit(ls, fmt, args...) \ | ||
79 | do { \ | ||
80 | if (dlm_config.ci_log_debug) \ | ||
81 | printk_ratelimited(KERN_DEBUG "dlm: %s: " fmt "\n", \ | ||
82 | (ls)->ls_name , ##args); \ | ||
83 | } while (0) | ||
84 | |||
77 | #define DLM_ASSERT(x, do) \ | 85 | #define DLM_ASSERT(x, do) \ |
78 | { \ | 86 | { \ |
79 | if (!(x)) \ | 87 | if (!(x)) \ |
@@ -263,6 +271,8 @@ struct dlm_lkb { | |||
263 | ktime_t lkb_last_cast_time; /* for debugging */ | 271 | ktime_t lkb_last_cast_time; /* for debugging */ |
264 | ktime_t lkb_last_bast_time; /* for debugging */ | 272 | ktime_t lkb_last_bast_time; /* for debugging */ |
265 | 273 | ||
274 | uint64_t lkb_recover_seq; /* from ls_recover_seq */ | ||
275 | |||
266 | char *lkb_lvbptr; | 276 | char *lkb_lvbptr; |
267 | struct dlm_lksb *lkb_lksb; /* caller's status block */ | 277 | struct dlm_lksb *lkb_lksb; /* caller's status block */ |
268 | void (*lkb_astfn) (void *astparam); | 278 | void (*lkb_astfn) (void *astparam); |
@@ -317,7 +327,7 @@ enum rsb_flags { | |||
317 | RSB_NEW_MASTER, | 327 | RSB_NEW_MASTER, |
318 | RSB_NEW_MASTER2, | 328 | RSB_NEW_MASTER2, |
319 | RSB_RECOVER_CONVERT, | 329 | RSB_RECOVER_CONVERT, |
320 | RSB_LOCKS_PURGED, | 330 | RSB_RECOVER_GRANT, |
321 | }; | 331 | }; |
322 | 332 | ||
323 | static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) | 333 | static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) |
@@ -563,6 +573,7 @@ struct dlm_ls { | |||
563 | struct mutex ls_requestqueue_mutex; | 573 | struct mutex ls_requestqueue_mutex; |
564 | struct dlm_rcom *ls_recover_buf; | 574 | struct dlm_rcom *ls_recover_buf; |
565 | int ls_recover_nodeid; /* for debugging */ | 575 | int ls_recover_nodeid; /* for debugging */ |
576 | unsigned int ls_recover_locks_in; /* for log info */ | ||
566 | uint64_t ls_rcom_seq; | 577 | uint64_t ls_rcom_seq; |
567 | spinlock_t ls_rcom_spin; | 578 | spinlock_t ls_rcom_spin; |
568 | struct list_head ls_recover_list; | 579 | struct list_head ls_recover_list; |
@@ -589,6 +600,7 @@ struct dlm_ls { | |||
589 | #define LSFL_UEVENT_WAIT 5 | 600 | #define LSFL_UEVENT_WAIT 5 |
590 | #define LSFL_TIMEWARN 6 | 601 | #define LSFL_TIMEWARN 6 |
591 | #define LSFL_CB_DELAY 7 | 602 | #define LSFL_CB_DELAY 7 |
603 | #define LSFL_NODIR 8 | ||
592 | 604 | ||
593 | /* much of this is just saving user space pointers associated with the | 605 | /* much of this is just saving user space pointers associated with the |
594 | lock that we pass back to the user lib with an ast */ | 606 | lock that we pass back to the user lib with an ast */ |
@@ -636,7 +648,7 @@ static inline int dlm_recovery_stopped(struct dlm_ls *ls) | |||
636 | 648 | ||
637 | static inline int dlm_no_directory(struct dlm_ls *ls) | 649 | static inline int dlm_no_directory(struct dlm_ls *ls) |
638 | { | 650 | { |
639 | return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0; | 651 | return test_bit(LSFL_NODIR, &ls->ls_flags); |
640 | } | 652 | } |
641 | 653 | ||
642 | int dlm_netlink_init(void); | 654 | int dlm_netlink_init(void); |
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 4c58d4a3adc4..bdafb65a5234 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c | |||
@@ -160,11 +160,12 @@ static const int __quecvt_compat_matrix[8][8] = { | |||
160 | 160 | ||
161 | void dlm_print_lkb(struct dlm_lkb *lkb) | 161 | void dlm_print_lkb(struct dlm_lkb *lkb) |
162 | { | 162 | { |
163 | printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n" | 163 | printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x " |
164 | " status %d rqmode %d grmode %d wait_type %d\n", | 164 | "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n", |
165 | lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, | 165 | lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, |
166 | lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode, | 166 | lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode, |
167 | lkb->lkb_grmode, lkb->lkb_wait_type); | 167 | lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, |
168 | (unsigned long long)lkb->lkb_recover_seq); | ||
168 | } | 169 | } |
169 | 170 | ||
170 | static void dlm_print_rsb(struct dlm_rsb *r) | 171 | static void dlm_print_rsb(struct dlm_rsb *r) |
@@ -251,8 +252,6 @@ static inline int is_process_copy(struct dlm_lkb *lkb) | |||
251 | 252 | ||
252 | static inline int is_master_copy(struct dlm_lkb *lkb) | 253 | static inline int is_master_copy(struct dlm_lkb *lkb) |
253 | { | 254 | { |
254 | if (lkb->lkb_flags & DLM_IFL_MSTCPY) | ||
255 | DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb);); | ||
256 | return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0; | 255 | return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0; |
257 | } | 256 | } |
258 | 257 | ||
@@ -479,6 +478,9 @@ static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b, | |||
479 | kref_get(&r->res_ref); | 478 | kref_get(&r->res_ref); |
480 | goto out; | 479 | goto out; |
481 | } | 480 | } |
481 | if (error == -ENOTBLK) | ||
482 | goto out; | ||
483 | |||
482 | error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r); | 484 | error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r); |
483 | if (error) | 485 | if (error) |
484 | goto out; | 486 | goto out; |
@@ -586,6 +588,23 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen, | |||
586 | return error; | 588 | return error; |
587 | } | 589 | } |
588 | 590 | ||
591 | static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) | ||
592 | { | ||
593 | struct rb_node *n; | ||
594 | struct dlm_rsb *r; | ||
595 | int i; | ||
596 | |||
597 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { | ||
598 | spin_lock(&ls->ls_rsbtbl[i].lock); | ||
599 | for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { | ||
600 | r = rb_entry(n, struct dlm_rsb, res_hashnode); | ||
601 | if (r->res_hash == hash) | ||
602 | dlm_dump_rsb(r); | ||
603 | } | ||
604 | spin_unlock(&ls->ls_rsbtbl[i].lock); | ||
605 | } | ||
606 | } | ||
607 | |||
589 | /* This is only called to add a reference when the code already holds | 608 | /* This is only called to add a reference when the code already holds |
590 | a valid reference to the rsb, so there's no need for locking. */ | 609 | a valid reference to the rsb, so there's no need for locking. */ |
591 | 610 | ||
@@ -1064,8 +1083,9 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, | |||
1064 | goto out_del; | 1083 | goto out_del; |
1065 | } | 1084 | } |
1066 | 1085 | ||
1067 | log_error(ls, "remwait error %x reply %d flags %x no wait_type", | 1086 | log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait", |
1068 | lkb->lkb_id, mstype, lkb->lkb_flags); | 1087 | lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid, |
1088 | mstype, lkb->lkb_flags); | ||
1069 | return -1; | 1089 | return -1; |
1070 | 1090 | ||
1071 | out_del: | 1091 | out_del: |
@@ -1498,13 +1518,13 @@ static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) | |||
1498 | } | 1518 | } |
1499 | 1519 | ||
1500 | lkb->lkb_rqmode = DLM_LOCK_IV; | 1520 | lkb->lkb_rqmode = DLM_LOCK_IV; |
1521 | lkb->lkb_highbast = 0; | ||
1501 | } | 1522 | } |
1502 | 1523 | ||
1503 | static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) | 1524 | static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) |
1504 | { | 1525 | { |
1505 | set_lvb_lock(r, lkb); | 1526 | set_lvb_lock(r, lkb); |
1506 | _grant_lock(r, lkb); | 1527 | _grant_lock(r, lkb); |
1507 | lkb->lkb_highbast = 0; | ||
1508 | } | 1528 | } |
1509 | 1529 | ||
1510 | static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, | 1530 | static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, |
@@ -1866,7 +1886,8 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, | |||
1866 | /* Returns the highest requested mode of all blocked conversions; sets | 1886 | /* Returns the highest requested mode of all blocked conversions; sets |
1867 | cw if there's a blocked conversion to DLM_LOCK_CW. */ | 1887 | cw if there's a blocked conversion to DLM_LOCK_CW. */ |
1868 | 1888 | ||
1869 | static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw) | 1889 | static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw, |
1890 | unsigned int *count) | ||
1870 | { | 1891 | { |
1871 | struct dlm_lkb *lkb, *s; | 1892 | struct dlm_lkb *lkb, *s; |
1872 | int hi, demoted, quit, grant_restart, demote_restart; | 1893 | int hi, demoted, quit, grant_restart, demote_restart; |
@@ -1885,6 +1906,8 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw) | |||
1885 | if (can_be_granted(r, lkb, 0, &deadlk)) { | 1906 | if (can_be_granted(r, lkb, 0, &deadlk)) { |
1886 | grant_lock_pending(r, lkb); | 1907 | grant_lock_pending(r, lkb); |
1887 | grant_restart = 1; | 1908 | grant_restart = 1; |
1909 | if (count) | ||
1910 | (*count)++; | ||
1888 | continue; | 1911 | continue; |
1889 | } | 1912 | } |
1890 | 1913 | ||
@@ -1918,14 +1941,17 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw) | |||
1918 | return max_t(int, high, hi); | 1941 | return max_t(int, high, hi); |
1919 | } | 1942 | } |
1920 | 1943 | ||
1921 | static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw) | 1944 | static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw, |
1945 | unsigned int *count) | ||
1922 | { | 1946 | { |
1923 | struct dlm_lkb *lkb, *s; | 1947 | struct dlm_lkb *lkb, *s; |
1924 | 1948 | ||
1925 | list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { | 1949 | list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { |
1926 | if (can_be_granted(r, lkb, 0, NULL)) | 1950 | if (can_be_granted(r, lkb, 0, NULL)) { |
1927 | grant_lock_pending(r, lkb); | 1951 | grant_lock_pending(r, lkb); |
1928 | else { | 1952 | if (count) |
1953 | (*count)++; | ||
1954 | } else { | ||
1929 | high = max_t(int, lkb->lkb_rqmode, high); | 1955 | high = max_t(int, lkb->lkb_rqmode, high); |
1930 | if (lkb->lkb_rqmode == DLM_LOCK_CW) | 1956 | if (lkb->lkb_rqmode == DLM_LOCK_CW) |
1931 | *cw = 1; | 1957 | *cw = 1; |
@@ -1954,16 +1980,20 @@ static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw) | |||
1954 | return 0; | 1980 | return 0; |
1955 | } | 1981 | } |
1956 | 1982 | ||
1957 | static void grant_pending_locks(struct dlm_rsb *r) | 1983 | static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count) |
1958 | { | 1984 | { |
1959 | struct dlm_lkb *lkb, *s; | 1985 | struct dlm_lkb *lkb, *s; |
1960 | int high = DLM_LOCK_IV; | 1986 | int high = DLM_LOCK_IV; |
1961 | int cw = 0; | 1987 | int cw = 0; |
1962 | 1988 | ||
1963 | DLM_ASSERT(is_master(r), dlm_dump_rsb(r);); | 1989 | if (!is_master(r)) { |
1990 | log_print("grant_pending_locks r nodeid %d", r->res_nodeid); | ||
1991 | dlm_dump_rsb(r); | ||
1992 | return; | ||
1993 | } | ||
1964 | 1994 | ||
1965 | high = grant_pending_convert(r, high, &cw); | 1995 | high = grant_pending_convert(r, high, &cw, count); |
1966 | high = grant_pending_wait(r, high, &cw); | 1996 | high = grant_pending_wait(r, high, &cw, count); |
1967 | 1997 | ||
1968 | if (high == DLM_LOCK_IV) | 1998 | if (high == DLM_LOCK_IV) |
1969 | return; | 1999 | return; |
@@ -2499,7 +2529,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) | |||
2499 | before we try again to grant this one. */ | 2529 | before we try again to grant this one. */ |
2500 | 2530 | ||
2501 | if (is_demoted(lkb)) { | 2531 | if (is_demoted(lkb)) { |
2502 | grant_pending_convert(r, DLM_LOCK_IV, NULL); | 2532 | grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL); |
2503 | if (_can_be_granted(r, lkb, 1)) { | 2533 | if (_can_be_granted(r, lkb, 1)) { |
2504 | grant_lock(r, lkb); | 2534 | grant_lock(r, lkb); |
2505 | queue_cast(r, lkb, 0); | 2535 | queue_cast(r, lkb, 0); |
@@ -2527,7 +2557,7 @@ static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
2527 | { | 2557 | { |
2528 | switch (error) { | 2558 | switch (error) { |
2529 | case 0: | 2559 | case 0: |
2530 | grant_pending_locks(r); | 2560 | grant_pending_locks(r, NULL); |
2531 | /* grant_pending_locks also sends basts */ | 2561 | /* grant_pending_locks also sends basts */ |
2532 | break; | 2562 | break; |
2533 | case -EAGAIN: | 2563 | case -EAGAIN: |
@@ -2550,7 +2580,7 @@ static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) | |||
2550 | static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, | 2580 | static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, |
2551 | int error) | 2581 | int error) |
2552 | { | 2582 | { |
2553 | grant_pending_locks(r); | 2583 | grant_pending_locks(r, NULL); |
2554 | } | 2584 | } |
2555 | 2585 | ||
2556 | /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */ | 2586 | /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */ |
@@ -2571,7 +2601,7 @@ static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
2571 | int error) | 2601 | int error) |
2572 | { | 2602 | { |
2573 | if (error) | 2603 | if (error) |
2574 | grant_pending_locks(r); | 2604 | grant_pending_locks(r, NULL); |
2575 | } | 2605 | } |
2576 | 2606 | ||
2577 | /* | 2607 | /* |
@@ -3372,7 +3402,7 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) | |||
3372 | return error; | 3402 | return error; |
3373 | } | 3403 | } |
3374 | 3404 | ||
3375 | static void receive_request(struct dlm_ls *ls, struct dlm_message *ms) | 3405 | static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) |
3376 | { | 3406 | { |
3377 | struct dlm_lkb *lkb; | 3407 | struct dlm_lkb *lkb; |
3378 | struct dlm_rsb *r; | 3408 | struct dlm_rsb *r; |
@@ -3412,14 +3442,15 @@ static void receive_request(struct dlm_ls *ls, struct dlm_message *ms) | |||
3412 | error = 0; | 3442 | error = 0; |
3413 | if (error) | 3443 | if (error) |
3414 | dlm_put_lkb(lkb); | 3444 | dlm_put_lkb(lkb); |
3415 | return; | 3445 | return 0; |
3416 | 3446 | ||
3417 | fail: | 3447 | fail: |
3418 | setup_stub_lkb(ls, ms); | 3448 | setup_stub_lkb(ls, ms); |
3419 | send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); | 3449 | send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); |
3450 | return error; | ||
3420 | } | 3451 | } |
3421 | 3452 | ||
3422 | static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms) | 3453 | static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) |
3423 | { | 3454 | { |
3424 | struct dlm_lkb *lkb; | 3455 | struct dlm_lkb *lkb; |
3425 | struct dlm_rsb *r; | 3456 | struct dlm_rsb *r; |
@@ -3429,6 +3460,15 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms) | |||
3429 | if (error) | 3460 | if (error) |
3430 | goto fail; | 3461 | goto fail; |
3431 | 3462 | ||
3463 | if (lkb->lkb_remid != ms->m_lkid) { | ||
3464 | log_error(ls, "receive_convert %x remid %x recover_seq %llu " | ||
3465 | "remote %d %x", lkb->lkb_id, lkb->lkb_remid, | ||
3466 | (unsigned long long)lkb->lkb_recover_seq, | ||
3467 | ms->m_header.h_nodeid, ms->m_lkid); | ||
3468 | error = -ENOENT; | ||
3469 | goto fail; | ||
3470 | } | ||
3471 | |||
3432 | r = lkb->lkb_resource; | 3472 | r = lkb->lkb_resource; |
3433 | 3473 | ||
3434 | hold_rsb(r); | 3474 | hold_rsb(r); |
@@ -3456,14 +3496,15 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms) | |||
3456 | unlock_rsb(r); | 3496 | unlock_rsb(r); |
3457 | put_rsb(r); | 3497 | put_rsb(r); |
3458 | dlm_put_lkb(lkb); | 3498 | dlm_put_lkb(lkb); |
3459 | return; | 3499 | return 0; |
3460 | 3500 | ||
3461 | fail: | 3501 | fail: |
3462 | setup_stub_lkb(ls, ms); | 3502 | setup_stub_lkb(ls, ms); |
3463 | send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); | 3503 | send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); |
3504 | return error; | ||
3464 | } | 3505 | } |
3465 | 3506 | ||
3466 | static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) | 3507 | static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) |
3467 | { | 3508 | { |
3468 | struct dlm_lkb *lkb; | 3509 | struct dlm_lkb *lkb; |
3469 | struct dlm_rsb *r; | 3510 | struct dlm_rsb *r; |
@@ -3473,6 +3514,14 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) | |||
3473 | if (error) | 3514 | if (error) |
3474 | goto fail; | 3515 | goto fail; |
3475 | 3516 | ||
3517 | if (lkb->lkb_remid != ms->m_lkid) { | ||
3518 | log_error(ls, "receive_unlock %x remid %x remote %d %x", | ||
3519 | lkb->lkb_id, lkb->lkb_remid, | ||
3520 | ms->m_header.h_nodeid, ms->m_lkid); | ||
3521 | error = -ENOENT; | ||
3522 | goto fail; | ||
3523 | } | ||
3524 | |||
3476 | r = lkb->lkb_resource; | 3525 | r = lkb->lkb_resource; |
3477 | 3526 | ||
3478 | hold_rsb(r); | 3527 | hold_rsb(r); |
@@ -3497,14 +3546,15 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) | |||
3497 | unlock_rsb(r); | 3546 | unlock_rsb(r); |
3498 | put_rsb(r); | 3547 | put_rsb(r); |
3499 | dlm_put_lkb(lkb); | 3548 | dlm_put_lkb(lkb); |
3500 | return; | 3549 | return 0; |
3501 | 3550 | ||
3502 | fail: | 3551 | fail: |
3503 | setup_stub_lkb(ls, ms); | 3552 | setup_stub_lkb(ls, ms); |
3504 | send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); | 3553 | send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); |
3554 | return error; | ||
3505 | } | 3555 | } |
3506 | 3556 | ||
3507 | static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) | 3557 | static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) |
3508 | { | 3558 | { |
3509 | struct dlm_lkb *lkb; | 3559 | struct dlm_lkb *lkb; |
3510 | struct dlm_rsb *r; | 3560 | struct dlm_rsb *r; |
@@ -3532,25 +3582,23 @@ static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) | |||
3532 | unlock_rsb(r); | 3582 | unlock_rsb(r); |
3533 | put_rsb(r); | 3583 | put_rsb(r); |
3534 | dlm_put_lkb(lkb); | 3584 | dlm_put_lkb(lkb); |
3535 | return; | 3585 | return 0; |
3536 | 3586 | ||
3537 | fail: | 3587 | fail: |
3538 | setup_stub_lkb(ls, ms); | 3588 | setup_stub_lkb(ls, ms); |
3539 | send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); | 3589 | send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); |
3590 | return error; | ||
3540 | } | 3591 | } |
3541 | 3592 | ||
3542 | static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms) | 3593 | static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) |
3543 | { | 3594 | { |
3544 | struct dlm_lkb *lkb; | 3595 | struct dlm_lkb *lkb; |
3545 | struct dlm_rsb *r; | 3596 | struct dlm_rsb *r; |
3546 | int error; | 3597 | int error; |
3547 | 3598 | ||
3548 | error = find_lkb(ls, ms->m_remid, &lkb); | 3599 | error = find_lkb(ls, ms->m_remid, &lkb); |
3549 | if (error) { | 3600 | if (error) |
3550 | log_debug(ls, "receive_grant from %d no lkb %x", | 3601 | return error; |
3551 | ms->m_header.h_nodeid, ms->m_remid); | ||
3552 | return; | ||
3553 | } | ||
3554 | 3602 | ||
3555 | r = lkb->lkb_resource; | 3603 | r = lkb->lkb_resource; |
3556 | 3604 | ||
@@ -3570,20 +3618,18 @@ static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms) | |||
3570 | unlock_rsb(r); | 3618 | unlock_rsb(r); |
3571 | put_rsb(r); | 3619 | put_rsb(r); |
3572 | dlm_put_lkb(lkb); | 3620 | dlm_put_lkb(lkb); |
3621 | return 0; | ||
3573 | } | 3622 | } |
3574 | 3623 | ||
3575 | static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms) | 3624 | static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) |
3576 | { | 3625 | { |
3577 | struct dlm_lkb *lkb; | 3626 | struct dlm_lkb *lkb; |
3578 | struct dlm_rsb *r; | 3627 | struct dlm_rsb *r; |
3579 | int error; | 3628 | int error; |
3580 | 3629 | ||
3581 | error = find_lkb(ls, ms->m_remid, &lkb); | 3630 | error = find_lkb(ls, ms->m_remid, &lkb); |
3582 | if (error) { | 3631 | if (error) |
3583 | log_debug(ls, "receive_bast from %d no lkb %x", | 3632 | return error; |
3584 | ms->m_header.h_nodeid, ms->m_remid); | ||
3585 | return; | ||
3586 | } | ||
3587 | 3633 | ||
3588 | r = lkb->lkb_resource; | 3634 | r = lkb->lkb_resource; |
3589 | 3635 | ||
@@ -3595,10 +3641,12 @@ static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms) | |||
3595 | goto out; | 3641 | goto out; |
3596 | 3642 | ||
3597 | queue_bast(r, lkb, ms->m_bastmode); | 3643 | queue_bast(r, lkb, ms->m_bastmode); |
3644 | lkb->lkb_highbast = ms->m_bastmode; | ||
3598 | out: | 3645 | out: |
3599 | unlock_rsb(r); | 3646 | unlock_rsb(r); |
3600 | put_rsb(r); | 3647 | put_rsb(r); |
3601 | dlm_put_lkb(lkb); | 3648 | dlm_put_lkb(lkb); |
3649 | return 0; | ||
3602 | } | 3650 | } |
3603 | 3651 | ||
3604 | static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) | 3652 | static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) |
@@ -3653,18 +3701,15 @@ static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms) | |||
3653 | do_purge(ls, ms->m_nodeid, ms->m_pid); | 3701 | do_purge(ls, ms->m_nodeid, ms->m_pid); |
3654 | } | 3702 | } |
3655 | 3703 | ||
3656 | static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) | 3704 | static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) |
3657 | { | 3705 | { |
3658 | struct dlm_lkb *lkb; | 3706 | struct dlm_lkb *lkb; |
3659 | struct dlm_rsb *r; | 3707 | struct dlm_rsb *r; |
3660 | int error, mstype, result; | 3708 | int error, mstype, result; |
3661 | 3709 | ||
3662 | error = find_lkb(ls, ms->m_remid, &lkb); | 3710 | error = find_lkb(ls, ms->m_remid, &lkb); |
3663 | if (error) { | 3711 | if (error) |
3664 | log_debug(ls, "receive_request_reply from %d no lkb %x", | 3712 | return error; |
3665 | ms->m_header.h_nodeid, ms->m_remid); | ||
3666 | return; | ||
3667 | } | ||
3668 | 3713 | ||
3669 | r = lkb->lkb_resource; | 3714 | r = lkb->lkb_resource; |
3670 | hold_rsb(r); | 3715 | hold_rsb(r); |
@@ -3676,8 +3721,13 @@ static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) | |||
3676 | 3721 | ||
3677 | mstype = lkb->lkb_wait_type; | 3722 | mstype = lkb->lkb_wait_type; |
3678 | error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY); | 3723 | error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY); |
3679 | if (error) | 3724 | if (error) { |
3725 | log_error(ls, "receive_request_reply %x remote %d %x result %d", | ||
3726 | lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid, | ||
3727 | ms->m_result); | ||
3728 | dlm_dump_rsb(r); | ||
3680 | goto out; | 3729 | goto out; |
3730 | } | ||
3681 | 3731 | ||
3682 | /* Optimization: the dir node was also the master, so it took our | 3732 | /* Optimization: the dir node was also the master, so it took our |
3683 | lookup as a request and sent request reply instead of lookup reply */ | 3733 | lookup as a request and sent request reply instead of lookup reply */ |
@@ -3755,6 +3805,7 @@ static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) | |||
3755 | unlock_rsb(r); | 3805 | unlock_rsb(r); |
3756 | put_rsb(r); | 3806 | put_rsb(r); |
3757 | dlm_put_lkb(lkb); | 3807 | dlm_put_lkb(lkb); |
3808 | return 0; | ||
3758 | } | 3809 | } |
3759 | 3810 | ||
3760 | static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, | 3811 | static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, |
@@ -3793,8 +3844,11 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
3793 | break; | 3844 | break; |
3794 | 3845 | ||
3795 | default: | 3846 | default: |
3796 | log_error(r->res_ls, "receive_convert_reply %x error %d", | 3847 | log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", |
3797 | lkb->lkb_id, ms->m_result); | 3848 | lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid, |
3849 | ms->m_result); | ||
3850 | dlm_print_rsb(r); | ||
3851 | dlm_print_lkb(lkb); | ||
3798 | } | 3852 | } |
3799 | } | 3853 | } |
3800 | 3854 | ||
@@ -3821,20 +3875,18 @@ static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms) | |||
3821 | put_rsb(r); | 3875 | put_rsb(r); |
3822 | } | 3876 | } |
3823 | 3877 | ||
3824 | static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms) | 3878 | static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms) |
3825 | { | 3879 | { |
3826 | struct dlm_lkb *lkb; | 3880 | struct dlm_lkb *lkb; |
3827 | int error; | 3881 | int error; |
3828 | 3882 | ||
3829 | error = find_lkb(ls, ms->m_remid, &lkb); | 3883 | error = find_lkb(ls, ms->m_remid, &lkb); |
3830 | if (error) { | 3884 | if (error) |
3831 | log_debug(ls, "receive_convert_reply from %d no lkb %x", | 3885 | return error; |
3832 | ms->m_header.h_nodeid, ms->m_remid); | ||
3833 | return; | ||
3834 | } | ||
3835 | 3886 | ||
3836 | _receive_convert_reply(lkb, ms); | 3887 | _receive_convert_reply(lkb, ms); |
3837 | dlm_put_lkb(lkb); | 3888 | dlm_put_lkb(lkb); |
3889 | return 0; | ||
3838 | } | 3890 | } |
3839 | 3891 | ||
3840 | static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) | 3892 | static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) |
@@ -3873,20 +3925,18 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) | |||
3873 | put_rsb(r); | 3925 | put_rsb(r); |
3874 | } | 3926 | } |
3875 | 3927 | ||
3876 | static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms) | 3928 | static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms) |
3877 | { | 3929 | { |
3878 | struct dlm_lkb *lkb; | 3930 | struct dlm_lkb *lkb; |
3879 | int error; | 3931 | int error; |
3880 | 3932 | ||
3881 | error = find_lkb(ls, ms->m_remid, &lkb); | 3933 | error = find_lkb(ls, ms->m_remid, &lkb); |
3882 | if (error) { | 3934 | if (error) |
3883 | log_debug(ls, "receive_unlock_reply from %d no lkb %x", | 3935 | return error; |
3884 | ms->m_header.h_nodeid, ms->m_remid); | ||
3885 | return; | ||
3886 | } | ||
3887 | 3936 | ||
3888 | _receive_unlock_reply(lkb, ms); | 3937 | _receive_unlock_reply(lkb, ms); |
3889 | dlm_put_lkb(lkb); | 3938 | dlm_put_lkb(lkb); |
3939 | return 0; | ||
3890 | } | 3940 | } |
3891 | 3941 | ||
3892 | static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) | 3942 | static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) |
@@ -3925,20 +3975,18 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) | |||
3925 | put_rsb(r); | 3975 | put_rsb(r); |
3926 | } | 3976 | } |
3927 | 3977 | ||
3928 | static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms) | 3978 | static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms) |
3929 | { | 3979 | { |
3930 | struct dlm_lkb *lkb; | 3980 | struct dlm_lkb *lkb; |
3931 | int error; | 3981 | int error; |
3932 | 3982 | ||
3933 | error = find_lkb(ls, ms->m_remid, &lkb); | 3983 | error = find_lkb(ls, ms->m_remid, &lkb); |
3934 | if (error) { | 3984 | if (error) |
3935 | log_debug(ls, "receive_cancel_reply from %d no lkb %x", | 3985 | return error; |
3936 | ms->m_header.h_nodeid, ms->m_remid); | ||
3937 | return; | ||
3938 | } | ||
3939 | 3986 | ||
3940 | _receive_cancel_reply(lkb, ms); | 3987 | _receive_cancel_reply(lkb, ms); |
3941 | dlm_put_lkb(lkb); | 3988 | dlm_put_lkb(lkb); |
3989 | return 0; | ||
3942 | } | 3990 | } |
3943 | 3991 | ||
3944 | static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) | 3992 | static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) |
@@ -3949,7 +3997,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) | |||
3949 | 3997 | ||
3950 | error = find_lkb(ls, ms->m_lkid, &lkb); | 3998 | error = find_lkb(ls, ms->m_lkid, &lkb); |
3951 | if (error) { | 3999 | if (error) { |
3952 | log_error(ls, "receive_lookup_reply no lkb"); | 4000 | log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid); |
3953 | return; | 4001 | return; |
3954 | } | 4002 | } |
3955 | 4003 | ||
@@ -3993,8 +4041,11 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) | |||
3993 | dlm_put_lkb(lkb); | 4041 | dlm_put_lkb(lkb); |
3994 | } | 4042 | } |
3995 | 4043 | ||
3996 | static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms) | 4044 | static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, |
4045 | uint32_t saved_seq) | ||
3997 | { | 4046 | { |
4047 | int error = 0, noent = 0; | ||
4048 | |||
3998 | if (!dlm_is_member(ls, ms->m_header.h_nodeid)) { | 4049 | if (!dlm_is_member(ls, ms->m_header.h_nodeid)) { |
3999 | log_debug(ls, "ignore non-member message %d from %d %x %x %d", | 4050 | log_debug(ls, "ignore non-member message %d from %d %x %x %d", |
4000 | ms->m_type, ms->m_header.h_nodeid, ms->m_lkid, | 4051 | ms->m_type, ms->m_header.h_nodeid, ms->m_lkid, |
@@ -4007,47 +4058,50 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms) | |||
4007 | /* messages sent to a master node */ | 4058 | /* messages sent to a master node */ |
4008 | 4059 | ||
4009 | case DLM_MSG_REQUEST: | 4060 | case DLM_MSG_REQUEST: |
4010 | receive_request(ls, ms); | 4061 | error = receive_request(ls, ms); |
4011 | break; | 4062 | break; |
4012 | 4063 | ||
4013 | case DLM_MSG_CONVERT: | 4064 | case DLM_MSG_CONVERT: |
4014 | receive_convert(ls, ms); | 4065 | error = receive_convert(ls, ms); |
4015 | break; | 4066 | break; |
4016 | 4067 | ||
4017 | case DLM_MSG_UNLOCK: | 4068 | case DLM_MSG_UNLOCK: |
4018 | receive_unlock(ls, ms); | 4069 | error = receive_unlock(ls, ms); |
4019 | break; | 4070 | break; |
4020 | 4071 | ||
4021 | case DLM_MSG_CANCEL: | 4072 | case DLM_MSG_CANCEL: |
4022 | receive_cancel(ls, ms); | 4073 | noent = 1; |
4074 | error = receive_cancel(ls, ms); | ||
4023 | break; | 4075 | break; |
4024 | 4076 | ||
4025 | /* messages sent from a master node (replies to above) */ | 4077 | /* messages sent from a master node (replies to above) */ |
4026 | 4078 | ||
4027 | case DLM_MSG_REQUEST_REPLY: | 4079 | case DLM_MSG_REQUEST_REPLY: |
4028 | receive_request_reply(ls, ms); | 4080 | error = receive_request_reply(ls, ms); |
4029 | break; | 4081 | break; |
4030 | 4082 | ||
4031 | case DLM_MSG_CONVERT_REPLY: | 4083 | case DLM_MSG_CONVERT_REPLY: |
4032 | receive_convert_reply(ls, ms); | 4084 | error = receive_convert_reply(ls, ms); |
4033 | break; | 4085 | break; |
4034 | 4086 | ||
4035 | case DLM_MSG_UNLOCK_REPLY: | 4087 | case DLM_MSG_UNLOCK_REPLY: |
4036 | receive_unlock_reply(ls, ms); | 4088 | error = receive_unlock_reply(ls, ms); |
4037 | break; | 4089 | break; |
4038 | 4090 | ||
4039 | case DLM_MSG_CANCEL_REPLY: | 4091 | case DLM_MSG_CANCEL_REPLY: |
4040 | receive_cancel_reply(ls, ms); | 4092 | error = receive_cancel_reply(ls, ms); |
4041 | break; | 4093 | break; |
4042 | 4094 | ||
4043 | /* messages sent from a master node (only two types of async msg) */ | 4095 | /* messages sent from a master node (only two types of async msg) */ |
4044 | 4096 | ||
4045 | case DLM_MSG_GRANT: | 4097 | case DLM_MSG_GRANT: |
4046 | receive_grant(ls, ms); | 4098 | noent = 1; |
4099 | error = receive_grant(ls, ms); | ||
4047 | break; | 4100 | break; |
4048 | 4101 | ||
4049 | case DLM_MSG_BAST: | 4102 | case DLM_MSG_BAST: |
4050 | receive_bast(ls, ms); | 4103 | noent = 1; |
4104 | error = receive_bast(ls, ms); | ||
4051 | break; | 4105 | break; |
4052 | 4106 | ||
4053 | /* messages sent to a dir node */ | 4107 | /* messages sent to a dir node */ |
@@ -4075,6 +4129,37 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms) | |||
4075 | default: | 4129 | default: |
4076 | log_error(ls, "unknown message type %d", ms->m_type); | 4130 | log_error(ls, "unknown message type %d", ms->m_type); |
4077 | } | 4131 | } |
4132 | |||
4133 | /* | ||
4134 | * When checking for ENOENT, we're checking the result of | ||
4135 | * find_lkb(m_remid): | ||
4136 | * | ||
4137 | * The lock id referenced in the message wasn't found. This may | ||
4138 | * happen in normal usage for the async messages and cancel, so | ||
4139 | * only use log_debug for them. | ||
4140 | * | ||
4141 | * Some errors are expected and normal. | ||
4142 | */ | ||
4143 | |||
4144 | if (error == -ENOENT && noent) { | ||
4145 | log_debug(ls, "receive %d no %x remote %d %x saved_seq %u", | ||
4146 | ms->m_type, ms->m_remid, ms->m_header.h_nodeid, | ||
4147 | ms->m_lkid, saved_seq); | ||
4148 | } else if (error == -ENOENT) { | ||
4149 | log_error(ls, "receive %d no %x remote %d %x saved_seq %u", | ||
4150 | ms->m_type, ms->m_remid, ms->m_header.h_nodeid, | ||
4151 | ms->m_lkid, saved_seq); | ||
4152 | |||
4153 | if (ms->m_type == DLM_MSG_CONVERT) | ||
4154 | dlm_dump_rsb_hash(ls, ms->m_hash); | ||
4155 | } | ||
4156 | |||
4157 | if (error == -EINVAL) { | ||
4158 | log_error(ls, "receive %d inval from %d lkid %x remid %x " | ||
4159 | "saved_seq %u", | ||
4160 | ms->m_type, ms->m_header.h_nodeid, | ||
4161 | ms->m_lkid, ms->m_remid, saved_seq); | ||
4162 | } | ||
4078 | } | 4163 | } |
4079 | 4164 | ||
4080 | /* If the lockspace is in recovery mode (locking stopped), then normal | 4165 | /* If the lockspace is in recovery mode (locking stopped), then normal |
@@ -4092,16 +4177,17 @@ static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms, | |||
4092 | dlm_add_requestqueue(ls, nodeid, ms); | 4177 | dlm_add_requestqueue(ls, nodeid, ms); |
4093 | } else { | 4178 | } else { |
4094 | dlm_wait_requestqueue(ls); | 4179 | dlm_wait_requestqueue(ls); |
4095 | _receive_message(ls, ms); | 4180 | _receive_message(ls, ms, 0); |
4096 | } | 4181 | } |
4097 | } | 4182 | } |
4098 | 4183 | ||
4099 | /* This is called by dlm_recoverd to process messages that were saved on | 4184 | /* This is called by dlm_recoverd to process messages that were saved on |
4100 | the requestqueue. */ | 4185 | the requestqueue. */ |
4101 | 4186 | ||
4102 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms) | 4187 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, |
4188 | uint32_t saved_seq) | ||
4103 | { | 4189 | { |
4104 | _receive_message(ls, ms); | 4190 | _receive_message(ls, ms, saved_seq); |
4105 | } | 4191 | } |
4106 | 4192 | ||
4107 | /* This is called by the midcomms layer when something is received for | 4193 | /* This is called by the midcomms layer when something is received for |
@@ -4137,9 +4223,11 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid) | |||
4137 | 4223 | ||
4138 | ls = dlm_find_lockspace_global(hd->h_lockspace); | 4224 | ls = dlm_find_lockspace_global(hd->h_lockspace); |
4139 | if (!ls) { | 4225 | if (!ls) { |
4140 | if (dlm_config.ci_log_debug) | 4226 | if (dlm_config.ci_log_debug) { |
4141 | log_print("invalid lockspace %x from %d cmd %d type %d", | 4227 | printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace " |
4142 | hd->h_lockspace, nodeid, hd->h_cmd, type); | 4228 | "%u from %d cmd %d type %d\n", |
4229 | hd->h_lockspace, nodeid, hd->h_cmd, type); | ||
4230 | } | ||
4143 | 4231 | ||
4144 | if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) | 4232 | if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) |
4145 | dlm_send_ls_not_ready(nodeid, &p->rcom); | 4233 | dlm_send_ls_not_ready(nodeid, &p->rcom); |
@@ -4187,15 +4275,13 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
4187 | /* A waiting lkb needs recovery if the master node has failed, or | 4275 | /* A waiting lkb needs recovery if the master node has failed, or |
4188 | the master node is changing (only when no directory is used) */ | 4276 | the master node is changing (only when no directory is used) */ |
4189 | 4277 | ||
4190 | static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb) | 4278 | static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb, |
4279 | int dir_nodeid) | ||
4191 | { | 4280 | { |
4192 | if (dlm_is_removed(ls, lkb->lkb_nodeid)) | 4281 | if (dlm_no_directory(ls)) |
4193 | return 1; | 4282 | return 1; |
4194 | 4283 | ||
4195 | if (!dlm_no_directory(ls)) | 4284 | if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) |
4196 | return 0; | ||
4197 | |||
4198 | if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid) | ||
4199 | return 1; | 4285 | return 1; |
4200 | 4286 | ||
4201 | return 0; | 4287 | return 0; |
@@ -4212,6 +4298,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) | |||
4212 | struct dlm_lkb *lkb, *safe; | 4298 | struct dlm_lkb *lkb, *safe; |
4213 | struct dlm_message *ms_stub; | 4299 | struct dlm_message *ms_stub; |
4214 | int wait_type, stub_unlock_result, stub_cancel_result; | 4300 | int wait_type, stub_unlock_result, stub_cancel_result; |
4301 | int dir_nodeid; | ||
4215 | 4302 | ||
4216 | ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL); | 4303 | ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL); |
4217 | if (!ms_stub) { | 4304 | if (!ms_stub) { |
@@ -4223,13 +4310,21 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) | |||
4223 | 4310 | ||
4224 | list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { | 4311 | list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { |
4225 | 4312 | ||
4313 | dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); | ||
4314 | |||
4226 | /* exclude debug messages about unlocks because there can be so | 4315 | /* exclude debug messages about unlocks because there can be so |
4227 | many and they aren't very interesting */ | 4316 | many and they aren't very interesting */ |
4228 | 4317 | ||
4229 | if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { | 4318 | if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { |
4230 | log_debug(ls, "recover_waiter %x nodeid %d " | 4319 | log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " |
4231 | "msg %d to %d", lkb->lkb_id, lkb->lkb_nodeid, | 4320 | "lkb_nodeid %d wait_nodeid %d dir_nodeid %d", |
4232 | lkb->lkb_wait_type, lkb->lkb_wait_nodeid); | 4321 | lkb->lkb_id, |
4322 | lkb->lkb_remid, | ||
4323 | lkb->lkb_wait_type, | ||
4324 | lkb->lkb_resource->res_nodeid, | ||
4325 | lkb->lkb_nodeid, | ||
4326 | lkb->lkb_wait_nodeid, | ||
4327 | dir_nodeid); | ||
4233 | } | 4328 | } |
4234 | 4329 | ||
4235 | /* all outstanding lookups, regardless of destination will be | 4330 | /* all outstanding lookups, regardless of destination will be |
@@ -4240,7 +4335,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) | |||
4240 | continue; | 4335 | continue; |
4241 | } | 4336 | } |
4242 | 4337 | ||
4243 | if (!waiter_needs_recovery(ls, lkb)) | 4338 | if (!waiter_needs_recovery(ls, lkb, dir_nodeid)) |
4244 | continue; | 4339 | continue; |
4245 | 4340 | ||
4246 | wait_type = lkb->lkb_wait_type; | 4341 | wait_type = lkb->lkb_wait_type; |
@@ -4373,8 +4468,11 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) | |||
4373 | ou = is_overlap_unlock(lkb); | 4468 | ou = is_overlap_unlock(lkb); |
4374 | err = 0; | 4469 | err = 0; |
4375 | 4470 | ||
4376 | log_debug(ls, "recover_waiter %x nodeid %d msg %d r_nodeid %d", | 4471 | log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " |
4377 | lkb->lkb_id, lkb->lkb_nodeid, mstype, r->res_nodeid); | 4472 | "lkb_nodeid %d wait_nodeid %d dir_nodeid %d " |
4473 | "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype, | ||
4474 | r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, | ||
4475 | dlm_dir_nodeid(r), oc, ou); | ||
4378 | 4476 | ||
4379 | /* At this point we assume that we won't get a reply to any | 4477 | /* At this point we assume that we won't get a reply to any |
4380 | previous op or overlap op on this lock. First, do a big | 4478 | previous op or overlap op on this lock. First, do a big |
@@ -4426,9 +4524,12 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) | |||
4426 | } | 4524 | } |
4427 | } | 4525 | } |
4428 | 4526 | ||
4429 | if (err) | 4527 | if (err) { |
4430 | log_error(ls, "recover_waiters_post %x %d %x %d %d", | 4528 | log_error(ls, "waiter %x msg %d r_nodeid %d " |
4431 | lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou); | 4529 | "dir_nodeid %d overlap %d %d", |
4530 | lkb->lkb_id, mstype, r->res_nodeid, | ||
4531 | dlm_dir_nodeid(r), oc, ou); | ||
4532 | } | ||
4432 | unlock_rsb(r); | 4533 | unlock_rsb(r); |
4433 | put_rsb(r); | 4534 | put_rsb(r); |
4434 | dlm_put_lkb(lkb); | 4535 | dlm_put_lkb(lkb); |
@@ -4437,112 +4538,177 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) | |||
4437 | return error; | 4538 | return error; |
4438 | } | 4539 | } |
4439 | 4540 | ||
4440 | static void purge_queue(struct dlm_rsb *r, struct list_head *queue, | 4541 | static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r, |
4441 | int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb)) | 4542 | struct list_head *list) |
4442 | { | 4543 | { |
4443 | struct dlm_ls *ls = r->res_ls; | ||
4444 | struct dlm_lkb *lkb, *safe; | 4544 | struct dlm_lkb *lkb, *safe; |
4445 | 4545 | ||
4446 | list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) { | 4546 | list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) { |
4447 | if (test(ls, lkb)) { | 4547 | if (!is_master_copy(lkb)) |
4448 | rsb_set_flag(r, RSB_LOCKS_PURGED); | 4548 | continue; |
4449 | del_lkb(r, lkb); | 4549 | |
4450 | /* this put should free the lkb */ | 4550 | /* don't purge lkbs we've added in recover_master_copy for |
4451 | if (!dlm_put_lkb(lkb)) | 4551 | the current recovery seq */ |
4452 | log_error(ls, "purged lkb not released"); | 4552 | |
4453 | } | 4553 | if (lkb->lkb_recover_seq == ls->ls_recover_seq) |
4554 | continue; | ||
4555 | |||
4556 | del_lkb(r, lkb); | ||
4557 | |||
4558 | /* this put should free the lkb */ | ||
4559 | if (!dlm_put_lkb(lkb)) | ||
4560 | log_error(ls, "purged mstcpy lkb not released"); | ||
4454 | } | 4561 | } |
4455 | } | 4562 | } |
4456 | 4563 | ||
4457 | static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb) | 4564 | void dlm_purge_mstcpy_locks(struct dlm_rsb *r) |
4458 | { | 4565 | { |
4459 | return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid)); | 4566 | struct dlm_ls *ls = r->res_ls; |
4460 | } | ||
4461 | 4567 | ||
4462 | static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb) | 4568 | purge_mstcpy_list(ls, r, &r->res_grantqueue); |
4463 | { | 4569 | purge_mstcpy_list(ls, r, &r->res_convertqueue); |
4464 | return is_master_copy(lkb); | 4570 | purge_mstcpy_list(ls, r, &r->res_waitqueue); |
4465 | } | 4571 | } |
4466 | 4572 | ||
4467 | static void purge_dead_locks(struct dlm_rsb *r) | 4573 | static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, |
4574 | struct list_head *list, | ||
4575 | int nodeid_gone, unsigned int *count) | ||
4468 | { | 4576 | { |
4469 | purge_queue(r, &r->res_grantqueue, &purge_dead_test); | 4577 | struct dlm_lkb *lkb, *safe; |
4470 | purge_queue(r, &r->res_convertqueue, &purge_dead_test); | ||
4471 | purge_queue(r, &r->res_waitqueue, &purge_dead_test); | ||
4472 | } | ||
4473 | 4578 | ||
4474 | void dlm_purge_mstcpy_locks(struct dlm_rsb *r) | 4579 | list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) { |
4475 | { | 4580 | if (!is_master_copy(lkb)) |
4476 | purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test); | 4581 | continue; |
4477 | purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test); | 4582 | |
4478 | purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test); | 4583 | if ((lkb->lkb_nodeid == nodeid_gone) || |
4584 | dlm_is_removed(ls, lkb->lkb_nodeid)) { | ||
4585 | |||
4586 | del_lkb(r, lkb); | ||
4587 | |||
4588 | /* this put should free the lkb */ | ||
4589 | if (!dlm_put_lkb(lkb)) | ||
4590 | log_error(ls, "purged dead lkb not released"); | ||
4591 | |||
4592 | rsb_set_flag(r, RSB_RECOVER_GRANT); | ||
4593 | |||
4594 | (*count)++; | ||
4595 | } | ||
4596 | } | ||
4479 | } | 4597 | } |
4480 | 4598 | ||
4481 | /* Get rid of locks held by nodes that are gone. */ | 4599 | /* Get rid of locks held by nodes that are gone. */ |
4482 | 4600 | ||
4483 | int dlm_purge_locks(struct dlm_ls *ls) | 4601 | void dlm_recover_purge(struct dlm_ls *ls) |
4484 | { | 4602 | { |
4485 | struct dlm_rsb *r; | 4603 | struct dlm_rsb *r; |
4604 | struct dlm_member *memb; | ||
4605 | int nodes_count = 0; | ||
4606 | int nodeid_gone = 0; | ||
4607 | unsigned int lkb_count = 0; | ||
4486 | 4608 | ||
4487 | log_debug(ls, "dlm_purge_locks"); | 4609 | /* cache one removed nodeid to optimize the common |
4610 | case of a single node removed */ | ||
4611 | |||
4612 | list_for_each_entry(memb, &ls->ls_nodes_gone, list) { | ||
4613 | nodes_count++; | ||
4614 | nodeid_gone = memb->nodeid; | ||
4615 | } | ||
4616 | |||
4617 | if (!nodes_count) | ||
4618 | return; | ||
4488 | 4619 | ||
4489 | down_write(&ls->ls_root_sem); | 4620 | down_write(&ls->ls_root_sem); |
4490 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | 4621 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
4491 | hold_rsb(r); | 4622 | hold_rsb(r); |
4492 | lock_rsb(r); | 4623 | lock_rsb(r); |
4493 | if (is_master(r)) | 4624 | if (is_master(r)) { |
4494 | purge_dead_locks(r); | 4625 | purge_dead_list(ls, r, &r->res_grantqueue, |
4626 | nodeid_gone, &lkb_count); | ||
4627 | purge_dead_list(ls, r, &r->res_convertqueue, | ||
4628 | nodeid_gone, &lkb_count); | ||
4629 | purge_dead_list(ls, r, &r->res_waitqueue, | ||
4630 | nodeid_gone, &lkb_count); | ||
4631 | } | ||
4495 | unlock_rsb(r); | 4632 | unlock_rsb(r); |
4496 | unhold_rsb(r); | 4633 | unhold_rsb(r); |
4497 | 4634 | cond_resched(); | |
4498 | schedule(); | ||
4499 | } | 4635 | } |
4500 | up_write(&ls->ls_root_sem); | 4636 | up_write(&ls->ls_root_sem); |
4501 | 4637 | ||
4502 | return 0; | 4638 | if (lkb_count) |
4639 | log_debug(ls, "dlm_recover_purge %u locks for %u nodes", | ||
4640 | lkb_count, nodes_count); | ||
4503 | } | 4641 | } |
4504 | 4642 | ||
4505 | static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket) | 4643 | static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) |
4506 | { | 4644 | { |
4507 | struct rb_node *n; | 4645 | struct rb_node *n; |
4508 | struct dlm_rsb *r, *r_ret = NULL; | 4646 | struct dlm_rsb *r; |
4509 | 4647 | ||
4510 | spin_lock(&ls->ls_rsbtbl[bucket].lock); | 4648 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
4511 | for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { | 4649 | for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { |
4512 | r = rb_entry(n, struct dlm_rsb, res_hashnode); | 4650 | r = rb_entry(n, struct dlm_rsb, res_hashnode); |
4513 | if (!rsb_flag(r, RSB_LOCKS_PURGED)) | 4651 | |
4652 | if (!rsb_flag(r, RSB_RECOVER_GRANT)) | ||
4653 | continue; | ||
4654 | rsb_clear_flag(r, RSB_RECOVER_GRANT); | ||
4655 | if (!is_master(r)) | ||
4514 | continue; | 4656 | continue; |
4515 | hold_rsb(r); | 4657 | hold_rsb(r); |
4516 | rsb_clear_flag(r, RSB_LOCKS_PURGED); | 4658 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
4517 | r_ret = r; | 4659 | return r; |
4518 | break; | ||
4519 | } | 4660 | } |
4520 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); | 4661 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
4521 | return r_ret; | 4662 | return NULL; |
4522 | } | 4663 | } |
4523 | 4664 | ||
4524 | void dlm_grant_after_purge(struct dlm_ls *ls) | 4665 | /* |
4666 | * Attempt to grant locks on resources that we are the master of. | ||
4667 | * Locks may have become grantable during recovery because locks | ||
4668 | * from departed nodes have been purged (or not rebuilt), allowing | ||
4669 | * previously blocked locks to now be granted. The subset of rsb's | ||
4670 | * we are interested in are those with lkb's on either the convert or | ||
4671 | * waiting queues. | ||
4672 | * | ||
4673 | * Simplest would be to go through each master rsb and check for non-empty | ||
4674 | * convert or waiting queues, and attempt to grant on those rsbs. | ||
4675 | * Checking the queues requires lock_rsb, though, for which we'd need | ||
4676 | * to release the rsbtbl lock. This would make iterating through all | ||
4677 | * rsb's very inefficient. So, we rely on earlier recovery routines | ||
4678 | * to set RECOVER_GRANT on any rsb's that we should attempt to grant | ||
4679 | * locks for. | ||
4680 | */ | ||
4681 | |||
4682 | void dlm_recover_grant(struct dlm_ls *ls) | ||
4525 | { | 4683 | { |
4526 | struct dlm_rsb *r; | 4684 | struct dlm_rsb *r; |
4527 | int bucket = 0; | 4685 | int bucket = 0; |
4686 | unsigned int count = 0; | ||
4687 | unsigned int rsb_count = 0; | ||
4688 | unsigned int lkb_count = 0; | ||
4528 | 4689 | ||
4529 | while (1) { | 4690 | while (1) { |
4530 | r = find_purged_rsb(ls, bucket); | 4691 | r = find_grant_rsb(ls, bucket); |
4531 | if (!r) { | 4692 | if (!r) { |
4532 | if (bucket == ls->ls_rsbtbl_size - 1) | 4693 | if (bucket == ls->ls_rsbtbl_size - 1) |
4533 | break; | 4694 | break; |
4534 | bucket++; | 4695 | bucket++; |
4535 | continue; | 4696 | continue; |
4536 | } | 4697 | } |
4698 | rsb_count++; | ||
4699 | count = 0; | ||
4537 | lock_rsb(r); | 4700 | lock_rsb(r); |
4538 | if (is_master(r)) { | 4701 | grant_pending_locks(r, &count); |
4539 | grant_pending_locks(r); | 4702 | lkb_count += count; |
4540 | confirm_master(r, 0); | 4703 | confirm_master(r, 0); |
4541 | } | ||
4542 | unlock_rsb(r); | 4704 | unlock_rsb(r); |
4543 | put_rsb(r); | 4705 | put_rsb(r); |
4544 | schedule(); | 4706 | cond_resched(); |
4545 | } | 4707 | } |
4708 | |||
4709 | if (lkb_count) | ||
4710 | log_debug(ls, "dlm_recover_grant %u locks on %u resources", | ||
4711 | lkb_count, rsb_count); | ||
4546 | } | 4712 | } |
4547 | 4713 | ||
4548 | static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid, | 4714 | static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid, |
@@ -4631,6 +4797,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4631 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; | 4797 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; |
4632 | struct dlm_rsb *r; | 4798 | struct dlm_rsb *r; |
4633 | struct dlm_lkb *lkb; | 4799 | struct dlm_lkb *lkb; |
4800 | uint32_t remid = 0; | ||
4634 | int error; | 4801 | int error; |
4635 | 4802 | ||
4636 | if (rl->rl_parent_lkid) { | 4803 | if (rl->rl_parent_lkid) { |
@@ -4638,14 +4805,31 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4638 | goto out; | 4805 | goto out; |
4639 | } | 4806 | } |
4640 | 4807 | ||
4641 | error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), | 4808 | remid = le32_to_cpu(rl->rl_lkid); |
4642 | R_MASTER, &r); | 4809 | |
4810 | /* In general we expect the rsb returned to be R_MASTER, but we don't | ||
4811 | have to require it. Recovery of masters on one node can overlap | ||
4812 | recovery of locks on another node, so one node can send us MSTCPY | ||
4813 | locks before we've made ourselves master of this rsb. We can still | ||
4814 | add new MSTCPY locks that we receive here without any harm; when | ||
4815 | we make ourselves master, dlm_recover_masters() won't touch the | ||
4816 | MSTCPY locks we've received early. */ | ||
4817 | |||
4818 | error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), 0, &r); | ||
4643 | if (error) | 4819 | if (error) |
4644 | goto out; | 4820 | goto out; |
4645 | 4821 | ||
4822 | if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) { | ||
4823 | log_error(ls, "dlm_recover_master_copy remote %d %x not dir", | ||
4824 | rc->rc_header.h_nodeid, remid); | ||
4825 | error = -EBADR; | ||
4826 | put_rsb(r); | ||
4827 | goto out; | ||
4828 | } | ||
4829 | |||
4646 | lock_rsb(r); | 4830 | lock_rsb(r); |
4647 | 4831 | ||
4648 | lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid)); | 4832 | lkb = search_remid(r, rc->rc_header.h_nodeid, remid); |
4649 | if (lkb) { | 4833 | if (lkb) { |
4650 | error = -EEXIST; | 4834 | error = -EEXIST; |
4651 | goto out_remid; | 4835 | goto out_remid; |
@@ -4664,19 +4848,25 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4664 | attach_lkb(r, lkb); | 4848 | attach_lkb(r, lkb); |
4665 | add_lkb(r, lkb, rl->rl_status); | 4849 | add_lkb(r, lkb, rl->rl_status); |
4666 | error = 0; | 4850 | error = 0; |
4851 | ls->ls_recover_locks_in++; | ||
4852 | |||
4853 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) | ||
4854 | rsb_set_flag(r, RSB_RECOVER_GRANT); | ||
4667 | 4855 | ||
4668 | out_remid: | 4856 | out_remid: |
4669 | /* this is the new value returned to the lock holder for | 4857 | /* this is the new value returned to the lock holder for |
4670 | saving in its process-copy lkb */ | 4858 | saving in its process-copy lkb */ |
4671 | rl->rl_remid = cpu_to_le32(lkb->lkb_id); | 4859 | rl->rl_remid = cpu_to_le32(lkb->lkb_id); |
4672 | 4860 | ||
4861 | lkb->lkb_recover_seq = ls->ls_recover_seq; | ||
4862 | |||
4673 | out_unlock: | 4863 | out_unlock: |
4674 | unlock_rsb(r); | 4864 | unlock_rsb(r); |
4675 | put_rsb(r); | 4865 | put_rsb(r); |
4676 | out: | 4866 | out: |
4677 | if (error) | 4867 | if (error && error != -EEXIST) |
4678 | log_debug(ls, "recover_master_copy %d %x", error, | 4868 | log_debug(ls, "dlm_recover_master_copy remote %d %x error %d", |
4679 | le32_to_cpu(rl->rl_lkid)); | 4869 | rc->rc_header.h_nodeid, remid, error); |
4680 | rl->rl_result = cpu_to_le32(error); | 4870 | rl->rl_result = cpu_to_le32(error); |
4681 | return error; | 4871 | return error; |
4682 | } | 4872 | } |
@@ -4687,41 +4877,52 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4687 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; | 4877 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; |
4688 | struct dlm_rsb *r; | 4878 | struct dlm_rsb *r; |
4689 | struct dlm_lkb *lkb; | 4879 | struct dlm_lkb *lkb; |
4690 | int error; | 4880 | uint32_t lkid, remid; |
4881 | int error, result; | ||
4882 | |||
4883 | lkid = le32_to_cpu(rl->rl_lkid); | ||
4884 | remid = le32_to_cpu(rl->rl_remid); | ||
4885 | result = le32_to_cpu(rl->rl_result); | ||
4691 | 4886 | ||
4692 | error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb); | 4887 | error = find_lkb(ls, lkid, &lkb); |
4693 | if (error) { | 4888 | if (error) { |
4694 | log_error(ls, "recover_process_copy no lkid %x", | 4889 | log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d", |
4695 | le32_to_cpu(rl->rl_lkid)); | 4890 | lkid, rc->rc_header.h_nodeid, remid, result); |
4696 | return error; | 4891 | return error; |
4697 | } | 4892 | } |
4698 | 4893 | ||
4699 | DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); | ||
4700 | |||
4701 | error = le32_to_cpu(rl->rl_result); | ||
4702 | |||
4703 | r = lkb->lkb_resource; | 4894 | r = lkb->lkb_resource; |
4704 | hold_rsb(r); | 4895 | hold_rsb(r); |
4705 | lock_rsb(r); | 4896 | lock_rsb(r); |
4706 | 4897 | ||
4707 | switch (error) { | 4898 | if (!is_process_copy(lkb)) { |
4899 | log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d", | ||
4900 | lkid, rc->rc_header.h_nodeid, remid, result); | ||
4901 | dlm_dump_rsb(r); | ||
4902 | unlock_rsb(r); | ||
4903 | put_rsb(r); | ||
4904 | dlm_put_lkb(lkb); | ||
4905 | return -EINVAL; | ||
4906 | } | ||
4907 | |||
4908 | switch (result) { | ||
4708 | case -EBADR: | 4909 | case -EBADR: |
4709 | /* There's a chance the new master received our lock before | 4910 | /* There's a chance the new master received our lock before |
4710 | dlm_recover_master_reply(), this wouldn't happen if we did | 4911 | dlm_recover_master_reply(), this wouldn't happen if we did |
4711 | a barrier between recover_masters and recover_locks. */ | 4912 | a barrier between recover_masters and recover_locks. */ |
4712 | log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id, | 4913 | |
4713 | (unsigned long)r, r->res_name); | 4914 | log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d", |
4915 | lkid, rc->rc_header.h_nodeid, remid, result); | ||
4916 | |||
4714 | dlm_send_rcom_lock(r, lkb); | 4917 | dlm_send_rcom_lock(r, lkb); |
4715 | goto out; | 4918 | goto out; |
4716 | case -EEXIST: | 4919 | case -EEXIST: |
4717 | log_debug(ls, "master copy exists %x", lkb->lkb_id); | ||
4718 | /* fall through */ | ||
4719 | case 0: | 4920 | case 0: |
4720 | lkb->lkb_remid = le32_to_cpu(rl->rl_remid); | 4921 | lkb->lkb_remid = remid; |
4721 | break; | 4922 | break; |
4722 | default: | 4923 | default: |
4723 | log_error(ls, "dlm_recover_process_copy unknown error %d %x", | 4924 | log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk", |
4724 | error, lkb->lkb_id); | 4925 | lkid, rc->rc_header.h_nodeid, remid, result); |
4725 | } | 4926 | } |
4726 | 4927 | ||
4727 | /* an ack for dlm_recover_locks() which waits for replies from | 4928 | /* an ack for dlm_recover_locks() which waits for replies from |
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 1a255307f6ff..c8b226c62807 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h | |||
@@ -15,7 +15,8 @@ | |||
15 | 15 | ||
16 | void dlm_dump_rsb(struct dlm_rsb *r); | 16 | void dlm_dump_rsb(struct dlm_rsb *r); |
17 | void dlm_print_lkb(struct dlm_lkb *lkb); | 17 | void dlm_print_lkb(struct dlm_lkb *lkb); |
18 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); | 18 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, |
19 | uint32_t saved_seq); | ||
19 | void dlm_receive_buffer(union dlm_packet *p, int nodeid); | 20 | void dlm_receive_buffer(union dlm_packet *p, int nodeid); |
20 | int dlm_modes_compat(int mode1, int mode2); | 21 | int dlm_modes_compat(int mode1, int mode2); |
21 | void dlm_put_rsb(struct dlm_rsb *r); | 22 | void dlm_put_rsb(struct dlm_rsb *r); |
@@ -31,9 +32,9 @@ void dlm_adjust_timeouts(struct dlm_ls *ls); | |||
31 | int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, | 32 | int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, |
32 | unsigned int flags, struct dlm_rsb **r_ret); | 33 | unsigned int flags, struct dlm_rsb **r_ret); |
33 | 34 | ||
34 | int dlm_purge_locks(struct dlm_ls *ls); | 35 | void dlm_recover_purge(struct dlm_ls *ls); |
35 | void dlm_purge_mstcpy_locks(struct dlm_rsb *r); | 36 | void dlm_purge_mstcpy_locks(struct dlm_rsb *r); |
36 | void dlm_grant_after_purge(struct dlm_ls *ls); | 37 | void dlm_recover_grant(struct dlm_ls *ls); |
37 | int dlm_recover_waiters_post(struct dlm_ls *ls); | 38 | int dlm_recover_waiters_post(struct dlm_ls *ls); |
38 | void dlm_recover_waiters_pre(struct dlm_ls *ls); | 39 | void dlm_recover_waiters_pre(struct dlm_ls *ls); |
39 | int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc); | 40 | int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc); |
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index a1ea25face82..ca506abbdd3b 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c | |||
@@ -74,6 +74,19 @@ static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len) | |||
74 | return len; | 74 | return len; |
75 | } | 75 | } |
76 | 76 | ||
77 | static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf) | ||
78 | { | ||
79 | return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls)); | ||
80 | } | ||
81 | |||
82 | static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len) | ||
83 | { | ||
84 | int val = simple_strtoul(buf, NULL, 0); | ||
85 | if (val == 1) | ||
86 | set_bit(LSFL_NODIR, &ls->ls_flags); | ||
87 | return len; | ||
88 | } | ||
89 | |||
77 | static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf) | 90 | static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf) |
78 | { | 91 | { |
79 | uint32_t status = dlm_recover_status(ls); | 92 | uint32_t status = dlm_recover_status(ls); |
@@ -107,6 +120,12 @@ static struct dlm_attr dlm_attr_id = { | |||
107 | .store = dlm_id_store | 120 | .store = dlm_id_store |
108 | }; | 121 | }; |
109 | 122 | ||
123 | static struct dlm_attr dlm_attr_nodir = { | ||
124 | .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR}, | ||
125 | .show = dlm_nodir_show, | ||
126 | .store = dlm_nodir_store | ||
127 | }; | ||
128 | |||
110 | static struct dlm_attr dlm_attr_recover_status = { | 129 | static struct dlm_attr dlm_attr_recover_status = { |
111 | .attr = {.name = "recover_status", .mode = S_IRUGO}, | 130 | .attr = {.name = "recover_status", .mode = S_IRUGO}, |
112 | .show = dlm_recover_status_show | 131 | .show = dlm_recover_status_show |
@@ -121,6 +140,7 @@ static struct attribute *dlm_attrs[] = { | |||
121 | &dlm_attr_control.attr, | 140 | &dlm_attr_control.attr, |
122 | &dlm_attr_event.attr, | 141 | &dlm_attr_event.attr, |
123 | &dlm_attr_id.attr, | 142 | &dlm_attr_id.attr, |
143 | &dlm_attr_nodir.attr, | ||
124 | &dlm_attr_recover_status.attr, | 144 | &dlm_attr_recover_status.attr, |
125 | &dlm_attr_recover_nodeid.attr, | 145 | &dlm_attr_recover_nodeid.attr, |
126 | NULL, | 146 | NULL, |
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 133ef6dc7cb7..5c1b0e38c7a4 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -142,6 +142,7 @@ struct writequeue_entry { | |||
142 | 142 | ||
143 | static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; | 143 | static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; |
144 | static int dlm_local_count; | 144 | static int dlm_local_count; |
145 | static int dlm_allow_conn; | ||
145 | 146 | ||
146 | /* Work queues */ | 147 | /* Work queues */ |
147 | static struct workqueue_struct *recv_workqueue; | 148 | static struct workqueue_struct *recv_workqueue; |
@@ -710,6 +711,13 @@ static int tcp_accept_from_sock(struct connection *con) | |||
710 | struct connection *newcon; | 711 | struct connection *newcon; |
711 | struct connection *addcon; | 712 | struct connection *addcon; |
712 | 713 | ||
714 | mutex_lock(&connections_lock); | ||
715 | if (!dlm_allow_conn) { | ||
716 | mutex_unlock(&connections_lock); | ||
717 | return -1; | ||
718 | } | ||
719 | mutex_unlock(&connections_lock); | ||
720 | |||
713 | memset(&peeraddr, 0, sizeof(peeraddr)); | 721 | memset(&peeraddr, 0, sizeof(peeraddr)); |
714 | result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, | 722 | result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, |
715 | IPPROTO_TCP, &newsock); | 723 | IPPROTO_TCP, &newsock); |
@@ -1503,6 +1511,7 @@ void dlm_lowcomms_stop(void) | |||
1503 | socket activity. | 1511 | socket activity. |
1504 | */ | 1512 | */ |
1505 | mutex_lock(&connections_lock); | 1513 | mutex_lock(&connections_lock); |
1514 | dlm_allow_conn = 0; | ||
1506 | foreach_conn(stop_conn); | 1515 | foreach_conn(stop_conn); |
1507 | mutex_unlock(&connections_lock); | 1516 | mutex_unlock(&connections_lock); |
1508 | 1517 | ||
@@ -1530,7 +1539,7 @@ int dlm_lowcomms_start(void) | |||
1530 | if (!dlm_local_count) { | 1539 | if (!dlm_local_count) { |
1531 | error = -ENOTCONN; | 1540 | error = -ENOTCONN; |
1532 | log_print("no local IP address has been set"); | 1541 | log_print("no local IP address has been set"); |
1533 | goto out; | 1542 | goto fail; |
1534 | } | 1543 | } |
1535 | 1544 | ||
1536 | error = -ENOMEM; | 1545 | error = -ENOMEM; |
@@ -1538,7 +1547,13 @@ int dlm_lowcomms_start(void) | |||
1538 | __alignof__(struct connection), 0, | 1547 | __alignof__(struct connection), 0, |
1539 | NULL); | 1548 | NULL); |
1540 | if (!con_cache) | 1549 | if (!con_cache) |
1541 | goto out; | 1550 | goto fail; |
1551 | |||
1552 | error = work_start(); | ||
1553 | if (error) | ||
1554 | goto fail_destroy; | ||
1555 | |||
1556 | dlm_allow_conn = 1; | ||
1542 | 1557 | ||
1543 | /* Start listening */ | 1558 | /* Start listening */ |
1544 | if (dlm_config.ci_protocol == 0) | 1559 | if (dlm_config.ci_protocol == 0) |
@@ -1548,20 +1563,17 @@ int dlm_lowcomms_start(void) | |||
1548 | if (error) | 1563 | if (error) |
1549 | goto fail_unlisten; | 1564 | goto fail_unlisten; |
1550 | 1565 | ||
1551 | error = work_start(); | ||
1552 | if (error) | ||
1553 | goto fail_unlisten; | ||
1554 | |||
1555 | return 0; | 1566 | return 0; |
1556 | 1567 | ||
1557 | fail_unlisten: | 1568 | fail_unlisten: |
1569 | dlm_allow_conn = 0; | ||
1558 | con = nodeid2con(0,0); | 1570 | con = nodeid2con(0,0); |
1559 | if (con) { | 1571 | if (con) { |
1560 | close_connection(con, false); | 1572 | close_connection(con, false); |
1561 | kmem_cache_free(con_cache, con); | 1573 | kmem_cache_free(con_cache, con); |
1562 | } | 1574 | } |
1575 | fail_destroy: | ||
1563 | kmem_cache_destroy(con_cache); | 1576 | kmem_cache_destroy(con_cache); |
1564 | 1577 | fail: | |
1565 | out: | ||
1566 | return error; | 1578 | return error; |
1567 | } | 1579 | } |
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index da64df7576e1..7cd24bccd4fe 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c | |||
@@ -21,21 +21,19 @@ static struct kmem_cache *rsb_cache; | |||
21 | 21 | ||
22 | int __init dlm_memory_init(void) | 22 | int __init dlm_memory_init(void) |
23 | { | 23 | { |
24 | int ret = 0; | ||
25 | |||
26 | lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), | 24 | lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), |
27 | __alignof__(struct dlm_lkb), 0, NULL); | 25 | __alignof__(struct dlm_lkb), 0, NULL); |
28 | if (!lkb_cache) | 26 | if (!lkb_cache) |
29 | ret = -ENOMEM; | 27 | return -ENOMEM; |
30 | 28 | ||
31 | rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), | 29 | rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), |
32 | __alignof__(struct dlm_rsb), 0, NULL); | 30 | __alignof__(struct dlm_rsb), 0, NULL); |
33 | if (!rsb_cache) { | 31 | if (!rsb_cache) { |
34 | kmem_cache_destroy(lkb_cache); | 32 | kmem_cache_destroy(lkb_cache); |
35 | ret = -ENOMEM; | 33 | return -ENOMEM; |
36 | } | 34 | } |
37 | 35 | ||
38 | return ret; | 36 | return 0; |
39 | } | 37 | } |
40 | 38 | ||
41 | void dlm_memory_exit(void) | 39 | void dlm_memory_exit(void) |
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index ac5c616c9696..64d3e2b958c7 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c | |||
@@ -486,47 +486,50 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) | |||
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
488 | 488 | ||
489 | static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc) | 489 | /* Called by dlm_recv; corresponds to dlm_receive_message() but special |
490 | recovery-only comms are sent through here. */ | ||
491 | |||
492 | void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | ||
490 | { | 493 | { |
494 | int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); | ||
495 | int stop, reply = 0, lock = 0; | ||
496 | uint32_t status; | ||
491 | uint64_t seq; | 497 | uint64_t seq; |
492 | int rv = 0; | ||
493 | 498 | ||
494 | switch (rc->rc_type) { | 499 | switch (rc->rc_type) { |
500 | case DLM_RCOM_LOCK: | ||
501 | lock = 1; | ||
502 | break; | ||
503 | case DLM_RCOM_LOCK_REPLY: | ||
504 | lock = 1; | ||
505 | reply = 1; | ||
506 | break; | ||
495 | case DLM_RCOM_STATUS_REPLY: | 507 | case DLM_RCOM_STATUS_REPLY: |
496 | case DLM_RCOM_NAMES_REPLY: | 508 | case DLM_RCOM_NAMES_REPLY: |
497 | case DLM_RCOM_LOOKUP_REPLY: | 509 | case DLM_RCOM_LOOKUP_REPLY: |
498 | case DLM_RCOM_LOCK_REPLY: | 510 | reply = 1; |
499 | spin_lock(&ls->ls_recover_lock); | 511 | }; |
500 | seq = ls->ls_recover_seq; | ||
501 | spin_unlock(&ls->ls_recover_lock); | ||
502 | if (rc->rc_seq_reply != seq) { | ||
503 | log_debug(ls, "ignoring old reply %x from %d " | ||
504 | "seq_reply %llx expect %llx", | ||
505 | rc->rc_type, rc->rc_header.h_nodeid, | ||
506 | (unsigned long long)rc->rc_seq_reply, | ||
507 | (unsigned long long)seq); | ||
508 | rv = 1; | ||
509 | } | ||
510 | } | ||
511 | return rv; | ||
512 | } | ||
513 | |||
514 | /* Called by dlm_recv; corresponds to dlm_receive_message() but special | ||
515 | recovery-only comms are sent through here. */ | ||
516 | 512 | ||
517 | void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | 513 | spin_lock(&ls->ls_recover_lock); |
518 | { | 514 | status = ls->ls_recover_status; |
519 | int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); | 515 | stop = test_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); |
516 | seq = ls->ls_recover_seq; | ||
517 | spin_unlock(&ls->ls_recover_lock); | ||
520 | 518 | ||
521 | if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { | 519 | if ((stop && (rc->rc_type != DLM_RCOM_STATUS)) || |
522 | log_debug(ls, "ignoring recovery message %x from %d", | 520 | (reply && (rc->rc_seq_reply != seq)) || |
523 | rc->rc_type, nodeid); | 521 | (lock && !(status & DLM_RS_DIR))) { |
522 | log_limit(ls, "dlm_receive_rcom ignore msg %d " | ||
523 | "from %d %llu %llu recover seq %llu sts %x gen %u", | ||
524 | rc->rc_type, | ||
525 | nodeid, | ||
526 | (unsigned long long)rc->rc_seq, | ||
527 | (unsigned long long)rc->rc_seq_reply, | ||
528 | (unsigned long long)seq, | ||
529 | status, ls->ls_generation); | ||
524 | goto out; | 530 | goto out; |
525 | } | 531 | } |
526 | 532 | ||
527 | if (is_old_reply(ls, rc)) | ||
528 | goto out; | ||
529 | |||
530 | switch (rc->rc_type) { | 533 | switch (rc->rc_type) { |
531 | case DLM_RCOM_STATUS: | 534 | case DLM_RCOM_STATUS: |
532 | receive_rcom_status(ls, rc); | 535 | receive_rcom_status(ls, rc); |
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 34d5adf1fce7..7554e4dac6bb 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c | |||
@@ -339,9 +339,12 @@ static void set_lock_master(struct list_head *queue, int nodeid) | |||
339 | { | 339 | { |
340 | struct dlm_lkb *lkb; | 340 | struct dlm_lkb *lkb; |
341 | 341 | ||
342 | list_for_each_entry(lkb, queue, lkb_statequeue) | 342 | list_for_each_entry(lkb, queue, lkb_statequeue) { |
343 | if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) | 343 | if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) { |
344 | lkb->lkb_nodeid = nodeid; | 344 | lkb->lkb_nodeid = nodeid; |
345 | lkb->lkb_remid = 0; | ||
346 | } | ||
347 | } | ||
345 | } | 348 | } |
346 | 349 | ||
347 | static void set_master_lkbs(struct dlm_rsb *r) | 350 | static void set_master_lkbs(struct dlm_rsb *r) |
@@ -354,18 +357,16 @@ static void set_master_lkbs(struct dlm_rsb *r) | |||
354 | /* | 357 | /* |
355 | * Propagate the new master nodeid to locks | 358 | * Propagate the new master nodeid to locks |
356 | * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. | 359 | * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. |
357 | * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which | 360 | * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which |
358 | * rsb's to consider. | 361 | * rsb's to consider. |
359 | */ | 362 | */ |
360 | 363 | ||
361 | static void set_new_master(struct dlm_rsb *r, int nodeid) | 364 | static void set_new_master(struct dlm_rsb *r, int nodeid) |
362 | { | 365 | { |
363 | lock_rsb(r); | ||
364 | r->res_nodeid = nodeid; | 366 | r->res_nodeid = nodeid; |
365 | set_master_lkbs(r); | 367 | set_master_lkbs(r); |
366 | rsb_set_flag(r, RSB_NEW_MASTER); | 368 | rsb_set_flag(r, RSB_NEW_MASTER); |
367 | rsb_set_flag(r, RSB_NEW_MASTER2); | 369 | rsb_set_flag(r, RSB_NEW_MASTER2); |
368 | unlock_rsb(r); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | /* | 372 | /* |
@@ -376,9 +377,9 @@ static void set_new_master(struct dlm_rsb *r, int nodeid) | |||
376 | static int recover_master(struct dlm_rsb *r) | 377 | static int recover_master(struct dlm_rsb *r) |
377 | { | 378 | { |
378 | struct dlm_ls *ls = r->res_ls; | 379 | struct dlm_ls *ls = r->res_ls; |
379 | int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid(); | 380 | int error, ret_nodeid; |
380 | 381 | int our_nodeid = dlm_our_nodeid(); | |
381 | dir_nodeid = dlm_dir_nodeid(r); | 382 | int dir_nodeid = dlm_dir_nodeid(r); |
382 | 383 | ||
383 | if (dir_nodeid == our_nodeid) { | 384 | if (dir_nodeid == our_nodeid) { |
384 | error = dlm_dir_lookup(ls, our_nodeid, r->res_name, | 385 | error = dlm_dir_lookup(ls, our_nodeid, r->res_name, |
@@ -388,7 +389,9 @@ static int recover_master(struct dlm_rsb *r) | |||
388 | 389 | ||
389 | if (ret_nodeid == our_nodeid) | 390 | if (ret_nodeid == our_nodeid) |
390 | ret_nodeid = 0; | 391 | ret_nodeid = 0; |
392 | lock_rsb(r); | ||
391 | set_new_master(r, ret_nodeid); | 393 | set_new_master(r, ret_nodeid); |
394 | unlock_rsb(r); | ||
392 | } else { | 395 | } else { |
393 | recover_list_add(r); | 396 | recover_list_add(r); |
394 | error = dlm_send_rcom_lookup(r, dir_nodeid); | 397 | error = dlm_send_rcom_lookup(r, dir_nodeid); |
@@ -398,24 +401,33 @@ static int recover_master(struct dlm_rsb *r) | |||
398 | } | 401 | } |
399 | 402 | ||
400 | /* | 403 | /* |
401 | * When not using a directory, most resource names will hash to a new static | 404 | * All MSTCPY locks are purged and rebuilt, even if the master stayed the same. |
402 | * master nodeid and the resource will need to be remastered. | 405 | * This is necessary because recovery can be started, aborted and restarted, |
406 | * causing the master nodeid to briefly change during the aborted recovery, and | ||
407 | * change back to the original value in the second recovery. The MSTCPY locks | ||
408 | * may or may not have been purged during the aborted recovery. Another node | ||
409 | * with an outstanding request in waiters list and a request reply saved in the | ||
410 | * requestqueue, cannot know whether it should ignore the reply and resend the | ||
411 | * request, or accept the reply and complete the request. It must do the | ||
412 | * former if the remote node purged MSTCPY locks, and it must do the later if | ||
413 | * the remote node did not. This is solved by always purging MSTCPY locks, in | ||
414 | * which case, the request reply would always be ignored and the request | ||
415 | * resent. | ||
403 | */ | 416 | */ |
404 | 417 | ||
405 | static int recover_master_static(struct dlm_rsb *r) | 418 | static int recover_master_static(struct dlm_rsb *r) |
406 | { | 419 | { |
407 | int master = dlm_dir_nodeid(r); | 420 | int dir_nodeid = dlm_dir_nodeid(r); |
421 | int new_master = dir_nodeid; | ||
408 | 422 | ||
409 | if (master == dlm_our_nodeid()) | 423 | if (dir_nodeid == dlm_our_nodeid()) |
410 | master = 0; | 424 | new_master = 0; |
411 | 425 | ||
412 | if (r->res_nodeid != master) { | 426 | lock_rsb(r); |
413 | if (is_master(r)) | 427 | dlm_purge_mstcpy_locks(r); |
414 | dlm_purge_mstcpy_locks(r); | 428 | set_new_master(r, new_master); |
415 | set_new_master(r, master); | 429 | unlock_rsb(r); |
416 | return 1; | 430 | return 1; |
417 | } | ||
418 | return 0; | ||
419 | } | 431 | } |
420 | 432 | ||
421 | /* | 433 | /* |
@@ -481,7 +493,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
481 | if (nodeid == dlm_our_nodeid()) | 493 | if (nodeid == dlm_our_nodeid()) |
482 | nodeid = 0; | 494 | nodeid = 0; |
483 | 495 | ||
496 | lock_rsb(r); | ||
484 | set_new_master(r, nodeid); | 497 | set_new_master(r, nodeid); |
498 | unlock_rsb(r); | ||
485 | recover_list_del(r); | 499 | recover_list_del(r); |
486 | 500 | ||
487 | if (recover_list_empty(ls)) | 501 | if (recover_list_empty(ls)) |
@@ -556,8 +570,6 @@ int dlm_recover_locks(struct dlm_ls *ls) | |||
556 | struct dlm_rsb *r; | 570 | struct dlm_rsb *r; |
557 | int error, count = 0; | 571 | int error, count = 0; |
558 | 572 | ||
559 | log_debug(ls, "dlm_recover_locks"); | ||
560 | |||
561 | down_read(&ls->ls_root_sem); | 573 | down_read(&ls->ls_root_sem); |
562 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | 574 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
563 | if (is_master(r)) { | 575 | if (is_master(r)) { |
@@ -584,7 +596,7 @@ int dlm_recover_locks(struct dlm_ls *ls) | |||
584 | } | 596 | } |
585 | up_read(&ls->ls_root_sem); | 597 | up_read(&ls->ls_root_sem); |
586 | 598 | ||
587 | log_debug(ls, "dlm_recover_locks %d locks", count); | 599 | log_debug(ls, "dlm_recover_locks %d out", count); |
588 | 600 | ||
589 | error = dlm_wait_function(ls, &recover_list_empty); | 601 | error = dlm_wait_function(ls, &recover_list_empty); |
590 | out: | 602 | out: |
@@ -721,21 +733,19 @@ static void recover_conversion(struct dlm_rsb *r) | |||
721 | } | 733 | } |
722 | 734 | ||
723 | /* We've become the new master for this rsb and waiting/converting locks may | 735 | /* We've become the new master for this rsb and waiting/converting locks may |
724 | need to be granted in dlm_grant_after_purge() due to locks that may have | 736 | need to be granted in dlm_recover_grant() due to locks that may have |
725 | existed from a removed node. */ | 737 | existed from a removed node. */ |
726 | 738 | ||
727 | static void set_locks_purged(struct dlm_rsb *r) | 739 | static void recover_grant(struct dlm_rsb *r) |
728 | { | 740 | { |
729 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) | 741 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) |
730 | rsb_set_flag(r, RSB_LOCKS_PURGED); | 742 | rsb_set_flag(r, RSB_RECOVER_GRANT); |
731 | } | 743 | } |
732 | 744 | ||
733 | void dlm_recover_rsbs(struct dlm_ls *ls) | 745 | void dlm_recover_rsbs(struct dlm_ls *ls) |
734 | { | 746 | { |
735 | struct dlm_rsb *r; | 747 | struct dlm_rsb *r; |
736 | int count = 0; | 748 | unsigned int count = 0; |
737 | |||
738 | log_debug(ls, "dlm_recover_rsbs"); | ||
739 | 749 | ||
740 | down_read(&ls->ls_root_sem); | 750 | down_read(&ls->ls_root_sem); |
741 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | 751 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
@@ -744,7 +754,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls) | |||
744 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) | 754 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) |
745 | recover_conversion(r); | 755 | recover_conversion(r); |
746 | if (rsb_flag(r, RSB_NEW_MASTER2)) | 756 | if (rsb_flag(r, RSB_NEW_MASTER2)) |
747 | set_locks_purged(r); | 757 | recover_grant(r); |
748 | recover_lvb(r); | 758 | recover_lvb(r); |
749 | count++; | 759 | count++; |
750 | } | 760 | } |
@@ -754,7 +764,8 @@ void dlm_recover_rsbs(struct dlm_ls *ls) | |||
754 | } | 764 | } |
755 | up_read(&ls->ls_root_sem); | 765 | up_read(&ls->ls_root_sem); |
756 | 766 | ||
757 | log_debug(ls, "dlm_recover_rsbs %d rsbs", count); | 767 | if (count) |
768 | log_debug(ls, "dlm_recover_rsbs %d done", count); | ||
758 | } | 769 | } |
759 | 770 | ||
760 | /* Create a single list of all root rsb's to be used during recovery */ | 771 | /* Create a single list of all root rsb's to be used during recovery */ |
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 3780caf7ae0c..f1a9073c0835 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c | |||
@@ -54,7 +54,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
54 | unsigned long start; | 54 | unsigned long start; |
55 | int error, neg = 0; | 55 | int error, neg = 0; |
56 | 56 | ||
57 | log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq); | 57 | log_debug(ls, "dlm_recover %llu", (unsigned long long)rv->seq); |
58 | 58 | ||
59 | mutex_lock(&ls->ls_recoverd_active); | 59 | mutex_lock(&ls->ls_recoverd_active); |
60 | 60 | ||
@@ -84,6 +84,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
84 | goto fail; | 84 | goto fail; |
85 | } | 85 | } |
86 | 86 | ||
87 | ls->ls_recover_locks_in = 0; | ||
88 | |||
87 | dlm_set_recover_status(ls, DLM_RS_NODES); | 89 | dlm_set_recover_status(ls, DLM_RS_NODES); |
88 | 90 | ||
89 | error = dlm_recover_members_wait(ls); | 91 | error = dlm_recover_members_wait(ls); |
@@ -130,7 +132,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
130 | * Clear lkb's for departed nodes. | 132 | * Clear lkb's for departed nodes. |
131 | */ | 133 | */ |
132 | 134 | ||
133 | dlm_purge_locks(ls); | 135 | dlm_recover_purge(ls); |
134 | 136 | ||
135 | /* | 137 | /* |
136 | * Get new master nodeid's for rsb's that were mastered on | 138 | * Get new master nodeid's for rsb's that were mastered on |
@@ -161,6 +163,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
161 | goto fail; | 163 | goto fail; |
162 | } | 164 | } |
163 | 165 | ||
166 | log_debug(ls, "dlm_recover_locks %u in", | ||
167 | ls->ls_recover_locks_in); | ||
168 | |||
164 | /* | 169 | /* |
165 | * Finalize state in master rsb's now that all locks can be | 170 | * Finalize state in master rsb's now that all locks can be |
166 | * checked. This includes conversion resolution and lvb | 171 | * checked. This includes conversion resolution and lvb |
@@ -225,9 +230,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
225 | goto fail; | 230 | goto fail; |
226 | } | 231 | } |
227 | 232 | ||
228 | dlm_grant_after_purge(ls); | 233 | dlm_recover_grant(ls); |
229 | 234 | ||
230 | log_debug(ls, "dlm_recover %llx generation %u done: %u ms", | 235 | log_debug(ls, "dlm_recover %llu generation %u done: %u ms", |
231 | (unsigned long long)rv->seq, ls->ls_generation, | 236 | (unsigned long long)rv->seq, ls->ls_generation, |
232 | jiffies_to_msecs(jiffies - start)); | 237 | jiffies_to_msecs(jiffies - start)); |
233 | mutex_unlock(&ls->ls_recoverd_active); | 238 | mutex_unlock(&ls->ls_recoverd_active); |
@@ -237,7 +242,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |||
237 | 242 | ||
238 | fail: | 243 | fail: |
239 | dlm_release_root_list(ls); | 244 | dlm_release_root_list(ls); |
240 | log_debug(ls, "dlm_recover %llx error %d", | 245 | log_debug(ls, "dlm_recover %llu error %d", |
241 | (unsigned long long)rv->seq, error); | 246 | (unsigned long long)rv->seq, error); |
242 | mutex_unlock(&ls->ls_recoverd_active); | 247 | mutex_unlock(&ls->ls_recoverd_active); |
243 | return error; | 248 | return error; |
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index a44fa22890e1..1695f1b0dd45 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | struct rq_entry { | 20 | struct rq_entry { |
21 | struct list_head list; | 21 | struct list_head list; |
22 | uint32_t recover_seq; | ||
22 | int nodeid; | 23 | int nodeid; |
23 | struct dlm_message request; | 24 | struct dlm_message request; |
24 | }; | 25 | }; |
@@ -41,6 +42,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) | |||
41 | return; | 42 | return; |
42 | } | 43 | } |
43 | 44 | ||
45 | e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; | ||
44 | e->nodeid = nodeid; | 46 | e->nodeid = nodeid; |
45 | memcpy(&e->request, ms, ms->m_header.h_length); | 47 | memcpy(&e->request, ms, ms->m_header.h_length); |
46 | 48 | ||
@@ -63,6 +65,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) | |||
63 | int dlm_process_requestqueue(struct dlm_ls *ls) | 65 | int dlm_process_requestqueue(struct dlm_ls *ls) |
64 | { | 66 | { |
65 | struct rq_entry *e; | 67 | struct rq_entry *e; |
68 | struct dlm_message *ms; | ||
66 | int error = 0; | 69 | int error = 0; |
67 | 70 | ||
68 | mutex_lock(&ls->ls_requestqueue_mutex); | 71 | mutex_lock(&ls->ls_requestqueue_mutex); |
@@ -76,7 +79,15 @@ int dlm_process_requestqueue(struct dlm_ls *ls) | |||
76 | e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); | 79 | e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); |
77 | mutex_unlock(&ls->ls_requestqueue_mutex); | 80 | mutex_unlock(&ls->ls_requestqueue_mutex); |
78 | 81 | ||
79 | dlm_receive_message_saved(ls, &e->request); | 82 | ms = &e->request; |
83 | |||
84 | log_limit(ls, "dlm_process_requestqueue msg %d from %d " | ||
85 | "lkid %x remid %x result %d seq %u", | ||
86 | ms->m_type, ms->m_header.h_nodeid, | ||
87 | ms->m_lkid, ms->m_remid, ms->m_result, | ||
88 | e->recover_seq); | ||
89 | |||
90 | dlm_receive_message_saved(ls, &e->request, e->recover_seq); | ||
80 | 91 | ||
81 | mutex_lock(&ls->ls_requestqueue_mutex); | 92 | mutex_lock(&ls->ls_requestqueue_mutex); |
82 | list_del(&e->list); | 93 | list_del(&e->list); |
@@ -138,35 +149,7 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) | |||
138 | if (!dlm_no_directory(ls)) | 149 | if (!dlm_no_directory(ls)) |
139 | return 0; | 150 | return 0; |
140 | 151 | ||
141 | /* with no directory, the master is likely to change as a part of | 152 | return 1; |
142 | recovery; requests to/from the defunct master need to be purged */ | ||
143 | |||
144 | switch (type) { | ||
145 | case DLM_MSG_REQUEST: | ||
146 | case DLM_MSG_CONVERT: | ||
147 | case DLM_MSG_UNLOCK: | ||
148 | case DLM_MSG_CANCEL: | ||
149 | /* we're no longer the master of this resource, the sender | ||
150 | will resend to the new master (see waiter_needs_recovery) */ | ||
151 | |||
152 | if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) | ||
153 | return 1; | ||
154 | break; | ||
155 | |||
156 | case DLM_MSG_REQUEST_REPLY: | ||
157 | case DLM_MSG_CONVERT_REPLY: | ||
158 | case DLM_MSG_UNLOCK_REPLY: | ||
159 | case DLM_MSG_CANCEL_REPLY: | ||
160 | case DLM_MSG_GRANT: | ||
161 | /* this reply is from the former master of the resource, | ||
162 | we'll resend to the new master if needed */ | ||
163 | |||
164 | if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) | ||
165 | return 1; | ||
166 | break; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | 153 | } |
171 | 154 | ||
172 | void dlm_purge_requestqueue(struct dlm_ls *ls) | 155 | void dlm_purge_requestqueue(struct dlm_ls *ls) |
@@ -1245,6 +1245,13 @@ static int check_unsafe_exec(struct linux_binprm *bprm) | |||
1245 | bprm->unsafe |= LSM_UNSAFE_PTRACE; | 1245 | bprm->unsafe |= LSM_UNSAFE_PTRACE; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | /* | ||
1249 | * This isn't strictly necessary, but it makes it harder for LSMs to | ||
1250 | * mess up. | ||
1251 | */ | ||
1252 | if (current->no_new_privs) | ||
1253 | bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; | ||
1254 | |||
1248 | n_fs = 1; | 1255 | n_fs = 1; |
1249 | spin_lock(&p->fs->lock); | 1256 | spin_lock(&p->fs->lock); |
1250 | rcu_read_lock(); | 1257 | rcu_read_lock(); |
@@ -1288,7 +1295,8 @@ int prepare_binprm(struct linux_binprm *bprm) | |||
1288 | bprm->cred->euid = current_euid(); | 1295 | bprm->cred->euid = current_euid(); |
1289 | bprm->cred->egid = current_egid(); | 1296 | bprm->cred->egid = current_egid(); |
1290 | 1297 | ||
1291 | if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { | 1298 | if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) && |
1299 | !current->no_new_privs) { | ||
1292 | /* Set-uid? */ | 1300 | /* Set-uid? */ |
1293 | if (mode & S_ISUID) { | 1301 | if (mode & S_ISUID) { |
1294 | bprm->per_clear |= PER_CLEAR_ON_SETID; | 1302 | bprm->per_clear |= PER_CLEAR_ON_SETID; |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index dffb86536285..f663a67d7bf0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -79,7 +79,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str | |||
79 | 79 | ||
80 | struct dentry *ext2_get_parent(struct dentry *child) | 80 | struct dentry *ext2_get_parent(struct dentry *child) |
81 | { | 81 | { |
82 | struct qstr dotdot = {.name = "..", .len = 2}; | 82 | struct qstr dotdot = QSTR_INIT("..", 2); |
83 | unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot); | 83 | unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot); |
84 | if (!ino) | 84 | if (!ino) |
85 | return ERR_PTR(-ENOENT); | 85 | return ERR_PTR(-ENOENT); |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index d7940b24cf68..eeb63dfc5d20 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -1045,7 +1045,7 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str | |||
1045 | struct dentry *ext3_get_parent(struct dentry *child) | 1045 | struct dentry *ext3_get_parent(struct dentry *child) |
1046 | { | 1046 | { |
1047 | unsigned long ino; | 1047 | unsigned long ino; |
1048 | struct qstr dotdot = {.name = "..", .len = 2}; | 1048 | struct qstr dotdot = QSTR_INIT("..", 2); |
1049 | struct ext3_dir_entry_2 * de; | 1049 | struct ext3_dir_entry_2 * de; |
1050 | struct buffer_head *bh; | 1050 | struct buffer_head *bh; |
1051 | 1051 | ||
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 349d7b3671c8..e2a3f4b0ff78 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -1052,10 +1052,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru | |||
1052 | struct dentry *ext4_get_parent(struct dentry *child) | 1052 | struct dentry *ext4_get_parent(struct dentry *child) |
1053 | { | 1053 | { |
1054 | __u32 ino; | 1054 | __u32 ino; |
1055 | static const struct qstr dotdot = { | 1055 | static const struct qstr dotdot = QSTR_INIT("..", 2); |
1056 | .name = "..", | ||
1057 | .len = 2, | ||
1058 | }; | ||
1059 | struct ext4_dir_entry_2 * de; | 1056 | struct ext4_dir_entry_2 * de; |
1060 | struct buffer_head *bh; | 1057 | struct buffer_head *bh; |
1061 | 1058 | ||
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 230eb0f005b6..bd4a5892c93c 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c | |||
@@ -73,12 +73,8 @@ static int gfs2_set_mode(struct inode *inode, umode_t mode) | |||
73 | int error = 0; | 73 | int error = 0; |
74 | 74 | ||
75 | if (mode != inode->i_mode) { | 75 | if (mode != inode->i_mode) { |
76 | struct iattr iattr; | 76 | inode->i_mode = mode; |
77 | 77 | mark_inode_dirty(inode); | |
78 | iattr.ia_valid = ATTR_MODE; | ||
79 | iattr.ia_mode = mode; | ||
80 | |||
81 | error = gfs2_setattr_simple(inode, &iattr); | ||
82 | } | 78 | } |
83 | 79 | ||
84 | return error; | 80 | return error; |
@@ -126,9 +122,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode) | |||
126 | return PTR_ERR(acl); | 122 | return PTR_ERR(acl); |
127 | if (!acl) { | 123 | if (!acl) { |
128 | mode &= ~current_umask(); | 124 | mode &= ~current_umask(); |
129 | if (mode != inode->i_mode) | 125 | return gfs2_set_mode(inode, mode); |
130 | error = gfs2_set_mode(inode, mode); | ||
131 | return error; | ||
132 | } | 126 | } |
133 | 127 | ||
134 | if (S_ISDIR(inode->i_mode)) { | 128 | if (S_ISDIR(inode->i_mode)) { |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 9b2ff0e851b1..e80a464850c8 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -36,8 +36,8 @@ | |||
36 | #include "glops.h" | 36 | #include "glops.h" |
37 | 37 | ||
38 | 38 | ||
39 | void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, | 39 | static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, |
40 | unsigned int from, unsigned int to) | 40 | unsigned int from, unsigned int to) |
41 | { | 41 | { |
42 | struct buffer_head *head = page_buffers(page); | 42 | struct buffer_head *head = page_buffers(page); |
43 | unsigned int bsize = head->b_size; | 43 | unsigned int bsize = head->b_size; |
@@ -517,15 +517,14 @@ out: | |||
517 | /** | 517 | /** |
518 | * gfs2_internal_read - read an internal file | 518 | * gfs2_internal_read - read an internal file |
519 | * @ip: The gfs2 inode | 519 | * @ip: The gfs2 inode |
520 | * @ra_state: The readahead state (or NULL for no readahead) | ||
521 | * @buf: The buffer to fill | 520 | * @buf: The buffer to fill |
522 | * @pos: The file position | 521 | * @pos: The file position |
523 | * @size: The amount to read | 522 | * @size: The amount to read |
524 | * | 523 | * |
525 | */ | 524 | */ |
526 | 525 | ||
527 | int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, | 526 | int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, |
528 | char *buf, loff_t *pos, unsigned size) | 527 | unsigned size) |
529 | { | 528 | { |
530 | struct address_space *mapping = ip->i_inode.i_mapping; | 529 | struct address_space *mapping = ip->i_inode.i_mapping; |
531 | unsigned long index = *pos / PAGE_CACHE_SIZE; | 530 | unsigned long index = *pos / PAGE_CACHE_SIZE; |
@@ -943,8 +942,8 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) | |||
943 | clear_buffer_dirty(bh); | 942 | clear_buffer_dirty(bh); |
944 | bd = bh->b_private; | 943 | bd = bh->b_private; |
945 | if (bd) { | 944 | if (bd) { |
946 | if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh)) | 945 | if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) |
947 | list_del_init(&bd->bd_le.le_list); | 946 | list_del_init(&bd->bd_list); |
948 | else | 947 | else |
949 | gfs2_remove_from_journal(bh, current->journal_info, 0); | 948 | gfs2_remove_from_journal(bh, current->journal_info, 0); |
950 | } | 949 | } |
@@ -1084,10 +1083,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) | |||
1084 | bd = bh->b_private; | 1083 | bd = bh->b_private; |
1085 | if (bd) { | 1084 | if (bd) { |
1086 | gfs2_assert_warn(sdp, bd->bd_bh == bh); | 1085 | gfs2_assert_warn(sdp, bd->bd_bh == bh); |
1087 | gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); | 1086 | if (!list_empty(&bd->bd_list)) { |
1088 | if (!list_empty(&bd->bd_le.le_list)) { | ||
1089 | if (!buffer_pinned(bh)) | 1087 | if (!buffer_pinned(bh)) |
1090 | list_del_init(&bd->bd_le.le_list); | 1088 | list_del_init(&bd->bd_list); |
1091 | else | 1089 | else |
1092 | bd = NULL; | 1090 | bd = NULL; |
1093 | } | 1091 | } |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 03c04febe26f..dab54099dd98 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -324,7 +324,7 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) | |||
324 | if (!dblock) | 324 | if (!dblock) |
325 | return x + 1; | 325 | return x + 1; |
326 | 326 | ||
327 | ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]); | 327 | ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]); |
328 | if (ret) | 328 | if (ret) |
329 | return ret; | 329 | return ret; |
330 | } | 330 | } |
@@ -882,7 +882,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
882 | top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; | 882 | top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; |
883 | bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; | 883 | bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; |
884 | } else { | 884 | } else { |
885 | error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); | 885 | error = gfs2_meta_indirect_buffer(ip, height, block, &bh); |
886 | if (error) | 886 | if (error) |
887 | return error; | 887 | return error; |
888 | 888 | ||
@@ -1169,6 +1169,7 @@ static int do_grow(struct inode *inode, u64 size) | |||
1169 | struct buffer_head *dibh; | 1169 | struct buffer_head *dibh; |
1170 | struct gfs2_qadata *qa = NULL; | 1170 | struct gfs2_qadata *qa = NULL; |
1171 | int error; | 1171 | int error; |
1172 | int unstuff = 0; | ||
1172 | 1173 | ||
1173 | if (gfs2_is_stuffed(ip) && | 1174 | if (gfs2_is_stuffed(ip) && |
1174 | (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) { | 1175 | (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) { |
@@ -1183,13 +1184,14 @@ static int do_grow(struct inode *inode, u64 size) | |||
1183 | error = gfs2_inplace_reserve(ip, 1); | 1184 | error = gfs2_inplace_reserve(ip, 1); |
1184 | if (error) | 1185 | if (error) |
1185 | goto do_grow_qunlock; | 1186 | goto do_grow_qunlock; |
1187 | unstuff = 1; | ||
1186 | } | 1188 | } |
1187 | 1189 | ||
1188 | error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); | 1190 | error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); |
1189 | if (error) | 1191 | if (error) |
1190 | goto do_grow_release; | 1192 | goto do_grow_release; |
1191 | 1193 | ||
1192 | if (qa) { | 1194 | if (unstuff) { |
1193 | error = gfs2_unstuff_dinode(ip, NULL); | 1195 | error = gfs2_unstuff_dinode(ip, NULL); |
1194 | if (error) | 1196 | if (error) |
1195 | goto do_end_trans; | 1197 | goto do_end_trans; |
@@ -1208,7 +1210,7 @@ static int do_grow(struct inode *inode, u64 size) | |||
1208 | do_end_trans: | 1210 | do_end_trans: |
1209 | gfs2_trans_end(sdp); | 1211 | gfs2_trans_end(sdp); |
1210 | do_grow_release: | 1212 | do_grow_release: |
1211 | if (qa) { | 1213 | if (unstuff) { |
1212 | gfs2_inplace_release(ip); | 1214 | gfs2_inplace_release(ip); |
1213 | do_grow_qunlock: | 1215 | do_grow_qunlock: |
1214 | gfs2_quota_unlock(ip); | 1216 | gfs2_quota_unlock(ip); |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index a836056343f0..8aaeb07a07b5 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -821,7 +821,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, | |||
821 | struct buffer_head *bh; | 821 | struct buffer_head *bh; |
822 | struct gfs2_leaf *leaf; | 822 | struct gfs2_leaf *leaf; |
823 | struct gfs2_dirent *dent; | 823 | struct gfs2_dirent *dent; |
824 | struct qstr name = { .name = "", .len = 0, .hash = 0 }; | 824 | struct qstr name = { .name = "" }; |
825 | 825 | ||
826 | error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); | 826 | error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); |
827 | if (error) | 827 | if (error) |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index a3d2c9ee8d66..31b199f6efc1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -558,14 +558,14 @@ fail: | |||
558 | } | 558 | } |
559 | 559 | ||
560 | /** | 560 | /** |
561 | * gfs2_close - called to close a struct file | 561 | * gfs2_release - called to close a struct file |
562 | * @inode: the inode the struct file belongs to | 562 | * @inode: the inode the struct file belongs to |
563 | * @file: the struct file being closed | 563 | * @file: the struct file being closed |
564 | * | 564 | * |
565 | * Returns: errno | 565 | * Returns: errno |
566 | */ | 566 | */ |
567 | 567 | ||
568 | static int gfs2_close(struct inode *inode, struct file *file) | 568 | static int gfs2_release(struct inode *inode, struct file *file) |
569 | { | 569 | { |
570 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; | 570 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; |
571 | struct gfs2_file *fp; | 571 | struct gfs2_file *fp; |
@@ -1005,7 +1005,7 @@ const struct file_operations gfs2_file_fops = { | |||
1005 | .unlocked_ioctl = gfs2_ioctl, | 1005 | .unlocked_ioctl = gfs2_ioctl, |
1006 | .mmap = gfs2_mmap, | 1006 | .mmap = gfs2_mmap, |
1007 | .open = gfs2_open, | 1007 | .open = gfs2_open, |
1008 | .release = gfs2_close, | 1008 | .release = gfs2_release, |
1009 | .fsync = gfs2_fsync, | 1009 | .fsync = gfs2_fsync, |
1010 | .lock = gfs2_lock, | 1010 | .lock = gfs2_lock, |
1011 | .flock = gfs2_flock, | 1011 | .flock = gfs2_flock, |
@@ -1019,7 +1019,7 @@ const struct file_operations gfs2_dir_fops = { | |||
1019 | .readdir = gfs2_readdir, | 1019 | .readdir = gfs2_readdir, |
1020 | .unlocked_ioctl = gfs2_ioctl, | 1020 | .unlocked_ioctl = gfs2_ioctl, |
1021 | .open = gfs2_open, | 1021 | .open = gfs2_open, |
1022 | .release = gfs2_close, | 1022 | .release = gfs2_release, |
1023 | .fsync = gfs2_fsync, | 1023 | .fsync = gfs2_fsync, |
1024 | .lock = gfs2_lock, | 1024 | .lock = gfs2_lock, |
1025 | .flock = gfs2_flock, | 1025 | .flock = gfs2_flock, |
@@ -1037,7 +1037,7 @@ const struct file_operations gfs2_file_fops_nolock = { | |||
1037 | .unlocked_ioctl = gfs2_ioctl, | 1037 | .unlocked_ioctl = gfs2_ioctl, |
1038 | .mmap = gfs2_mmap, | 1038 | .mmap = gfs2_mmap, |
1039 | .open = gfs2_open, | 1039 | .open = gfs2_open, |
1040 | .release = gfs2_close, | 1040 | .release = gfs2_release, |
1041 | .fsync = gfs2_fsync, | 1041 | .fsync = gfs2_fsync, |
1042 | .splice_read = generic_file_splice_read, | 1042 | .splice_read = generic_file_splice_read, |
1043 | .splice_write = generic_file_splice_write, | 1043 | .splice_write = generic_file_splice_write, |
@@ -1049,7 +1049,7 @@ const struct file_operations gfs2_dir_fops_nolock = { | |||
1049 | .readdir = gfs2_readdir, | 1049 | .readdir = gfs2_readdir, |
1050 | .unlocked_ioctl = gfs2_ioctl, | 1050 | .unlocked_ioctl = gfs2_ioctl, |
1051 | .open = gfs2_open, | 1051 | .open = gfs2_open, |
1052 | .release = gfs2_close, | 1052 | .release = gfs2_release, |
1053 | .fsync = gfs2_fsync, | 1053 | .fsync = gfs2_fsync, |
1054 | .llseek = default_llseek, | 1054 | .llseek = default_llseek, |
1055 | }; | 1055 | }; |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 1656df7aacd2..4bdcf3784187 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -94,7 +94,6 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | |||
94 | /* A shortened, inline version of gfs2_trans_begin() */ | 94 | /* A shortened, inline version of gfs2_trans_begin() */ |
95 | tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); | 95 | tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); |
96 | tr.tr_ip = (unsigned long)__builtin_return_address(0); | 96 | tr.tr_ip = (unsigned long)__builtin_return_address(0); |
97 | INIT_LIST_HEAD(&tr.tr_list_buf); | ||
98 | gfs2_log_reserve(sdp, tr.tr_reserved); | 97 | gfs2_log_reserve(sdp, tr.tr_reserved); |
99 | BUG_ON(current->journal_info); | 98 | BUG_ON(current->journal_info); |
100 | current->journal_info = &tr; | 99 | current->journal_info = &tr; |
@@ -379,11 +378,6 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) | |||
379 | if (error) | 378 | if (error) |
380 | return error; | 379 | return error; |
381 | 380 | ||
382 | if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) { | ||
383 | brelse(dibh); | ||
384 | return -EIO; | ||
385 | } | ||
386 | |||
387 | error = gfs2_dinode_in(ip, dibh->b_data); | 381 | error = gfs2_dinode_in(ip, dibh->b_data); |
388 | brelse(dibh); | 382 | brelse(dibh); |
389 | clear_bit(GIF_INVALID, &ip->i_flags); | 383 | clear_bit(GIF_INVALID, &ip->i_flags); |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 47d0bda5ac2b..67fd6beffece 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define DIO_METADATA 0x00000020 | 26 | #define DIO_METADATA 0x00000020 |
27 | 27 | ||
28 | struct gfs2_log_operations; | 28 | struct gfs2_log_operations; |
29 | struct gfs2_log_element; | 29 | struct gfs2_bufdata; |
30 | struct gfs2_holder; | 30 | struct gfs2_holder; |
31 | struct gfs2_glock; | 31 | struct gfs2_glock; |
32 | struct gfs2_quota_data; | 32 | struct gfs2_quota_data; |
@@ -52,7 +52,7 @@ struct gfs2_log_header_host { | |||
52 | */ | 52 | */ |
53 | 53 | ||
54 | struct gfs2_log_operations { | 54 | struct gfs2_log_operations { |
55 | void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le); | 55 | void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); |
56 | void (*lo_before_commit) (struct gfs2_sbd *sdp); | 56 | void (*lo_before_commit) (struct gfs2_sbd *sdp); |
57 | void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); | 57 | void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); |
58 | void (*lo_before_scan) (struct gfs2_jdesc *jd, | 58 | void (*lo_before_scan) (struct gfs2_jdesc *jd, |
@@ -64,11 +64,6 @@ struct gfs2_log_operations { | |||
64 | const char *lo_name; | 64 | const char *lo_name; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct gfs2_log_element { | ||
68 | struct list_head le_list; | ||
69 | const struct gfs2_log_operations *le_ops; | ||
70 | }; | ||
71 | |||
72 | #define GBF_FULL 1 | 67 | #define GBF_FULL 1 |
73 | 68 | ||
74 | struct gfs2_bitmap { | 69 | struct gfs2_bitmap { |
@@ -118,15 +113,10 @@ TAS_BUFFER_FNS(Zeronew, zeronew) | |||
118 | struct gfs2_bufdata { | 113 | struct gfs2_bufdata { |
119 | struct buffer_head *bd_bh; | 114 | struct buffer_head *bd_bh; |
120 | struct gfs2_glock *bd_gl; | 115 | struct gfs2_glock *bd_gl; |
116 | u64 bd_blkno; | ||
121 | 117 | ||
122 | union { | 118 | struct list_head bd_list; |
123 | struct list_head list_tr; | 119 | const struct gfs2_log_operations *bd_ops; |
124 | u64 blkno; | ||
125 | } u; | ||
126 | #define bd_list_tr u.list_tr | ||
127 | #define bd_blkno u.blkno | ||
128 | |||
129 | struct gfs2_log_element bd_le; | ||
130 | 120 | ||
131 | struct gfs2_ail *bd_ail; | 121 | struct gfs2_ail *bd_ail; |
132 | struct list_head bd_ail_st_list; | 122 | struct list_head bd_ail_st_list; |
@@ -411,13 +401,10 @@ struct gfs2_trans { | |||
411 | 401 | ||
412 | int tr_touched; | 402 | int tr_touched; |
413 | 403 | ||
414 | unsigned int tr_num_buf; | ||
415 | unsigned int tr_num_buf_new; | 404 | unsigned int tr_num_buf_new; |
416 | unsigned int tr_num_databuf_new; | 405 | unsigned int tr_num_databuf_new; |
417 | unsigned int tr_num_buf_rm; | 406 | unsigned int tr_num_buf_rm; |
418 | unsigned int tr_num_databuf_rm; | 407 | unsigned int tr_num_databuf_rm; |
419 | struct list_head tr_list_buf; | ||
420 | |||
421 | unsigned int tr_num_revoke; | 408 | unsigned int tr_num_revoke; |
422 | unsigned int tr_num_revoke_rm; | 409 | unsigned int tr_num_revoke_rm; |
423 | }; | 410 | }; |
@@ -556,7 +543,6 @@ struct gfs2_sb_host { | |||
556 | struct lm_lockstruct { | 543 | struct lm_lockstruct { |
557 | int ls_jid; | 544 | int ls_jid; |
558 | unsigned int ls_first; | 545 | unsigned int ls_first; |
559 | unsigned int ls_nodir; | ||
560 | const struct lm_lockops *ls_ops; | 546 | const struct lm_lockops *ls_ops; |
561 | dlm_lockspace_t *ls_dlm; | 547 | dlm_lockspace_t *ls_dlm; |
562 | 548 | ||
@@ -699,7 +685,6 @@ struct gfs2_sbd { | |||
699 | 685 | ||
700 | struct list_head sd_log_le_buf; | 686 | struct list_head sd_log_le_buf; |
701 | struct list_head sd_log_le_revoke; | 687 | struct list_head sd_log_le_revoke; |
702 | struct list_head sd_log_le_rg; | ||
703 | struct list_head sd_log_le_databuf; | 688 | struct list_head sd_log_le_databuf; |
704 | struct list_head sd_log_le_ordered; | 689 | struct list_head sd_log_le_ordered; |
705 | 690 | ||
@@ -716,7 +701,9 @@ struct gfs2_sbd { | |||
716 | 701 | ||
717 | struct rw_semaphore sd_log_flush_lock; | 702 | struct rw_semaphore sd_log_flush_lock; |
718 | atomic_t sd_log_in_flight; | 703 | atomic_t sd_log_in_flight; |
704 | struct bio *sd_log_bio; | ||
719 | wait_queue_head_t sd_log_flush_wait; | 705 | wait_queue_head_t sd_log_flush_wait; |
706 | int sd_log_error; | ||
720 | 707 | ||
721 | unsigned int sd_log_flush_head; | 708 | unsigned int sd_log_flush_head; |
722 | u64 sd_log_flush_wrapped; | 709 | u64 sd_log_flush_wrapped; |
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 276e7b52b658..c53c7477f6da 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
@@ -17,10 +17,7 @@ | |||
17 | 17 | ||
18 | extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask); | 18 | extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask); |
19 | extern int gfs2_internal_read(struct gfs2_inode *ip, | 19 | extern int gfs2_internal_read(struct gfs2_inode *ip, |
20 | struct file_ra_state *ra_state, | ||
21 | char *buf, loff_t *pos, unsigned size); | 20 | char *buf, loff_t *pos, unsigned size); |
22 | extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, | ||
23 | unsigned int from, unsigned int to); | ||
24 | extern void gfs2_set_aops(struct inode *inode); | 21 | extern void gfs2_set_aops(struct inode *inode); |
25 | 22 | ||
26 | static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) | 23 | static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 5f5e70e047dc..4a38db739ca0 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -1209,8 +1209,6 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |||
1209 | fsname++; | 1209 | fsname++; |
1210 | 1210 | ||
1211 | flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL; | 1211 | flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL; |
1212 | if (ls->ls_nodir) | ||
1213 | flags |= DLM_LSFL_NODIR; | ||
1214 | 1212 | ||
1215 | /* | 1213 | /* |
1216 | * create/join lockspace | 1214 | * create/join lockspace |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 4752eadc7f6e..f4beeb9c81c1 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include "dir.h" | 32 | #include "dir.h" |
33 | #include "trace_gfs2.h" | 33 | #include "trace_gfs2.h" |
34 | 34 | ||
35 | #define PULL 1 | ||
36 | |||
37 | /** | 35 | /** |
38 | * gfs2_struct2blk - compute stuff | 36 | * gfs2_struct2blk - compute stuff |
39 | * @sdp: the filesystem | 37 | * @sdp: the filesystem |
@@ -359,18 +357,6 @@ retry: | |||
359 | return 0; | 357 | return 0; |
360 | } | 358 | } |
361 | 359 | ||
362 | u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) | ||
363 | { | ||
364 | struct gfs2_journal_extent *je; | ||
365 | |||
366 | list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { | ||
367 | if (lbn >= je->lblock && lbn < je->lblock + je->blocks) | ||
368 | return je->dblock + lbn - je->lblock; | ||
369 | } | ||
370 | |||
371 | return -1; | ||
372 | } | ||
373 | |||
374 | /** | 360 | /** |
375 | * log_distance - Compute distance between two journal blocks | 361 | * log_distance - Compute distance between two journal blocks |
376 | * @sdp: The GFS2 superblock | 362 | * @sdp: The GFS2 superblock |
@@ -466,17 +452,6 @@ static unsigned int current_tail(struct gfs2_sbd *sdp) | |||
466 | return tail; | 452 | return tail; |
467 | } | 453 | } |
468 | 454 | ||
469 | void gfs2_log_incr_head(struct gfs2_sbd *sdp) | ||
470 | { | ||
471 | BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && | ||
472 | (sdp->sd_log_flush_head != sdp->sd_log_head)); | ||
473 | |||
474 | if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { | ||
475 | sdp->sd_log_flush_head = 0; | ||
476 | sdp->sd_log_flush_wrapped = 1; | ||
477 | } | ||
478 | } | ||
479 | |||
480 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) | 455 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
481 | { | 456 | { |
482 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | 457 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); |
@@ -511,8 +486,8 @@ static int bd_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
511 | { | 486 | { |
512 | struct gfs2_bufdata *bda, *bdb; | 487 | struct gfs2_bufdata *bda, *bdb; |
513 | 488 | ||
514 | bda = list_entry(a, struct gfs2_bufdata, bd_le.le_list); | 489 | bda = list_entry(a, struct gfs2_bufdata, bd_list); |
515 | bdb = list_entry(b, struct gfs2_bufdata, bd_le.le_list); | 490 | bdb = list_entry(b, struct gfs2_bufdata, bd_list); |
516 | 491 | ||
517 | if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) | 492 | if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) |
518 | return -1; | 493 | return -1; |
@@ -530,8 +505,8 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp) | |||
530 | gfs2_log_lock(sdp); | 505 | gfs2_log_lock(sdp); |
531 | list_sort(NULL, &sdp->sd_log_le_ordered, &bd_cmp); | 506 | list_sort(NULL, &sdp->sd_log_le_ordered, &bd_cmp); |
532 | while (!list_empty(&sdp->sd_log_le_ordered)) { | 507 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
533 | bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list); | 508 | bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_list); |
534 | list_move(&bd->bd_le.le_list, &written); | 509 | list_move(&bd->bd_list, &written); |
535 | bh = bd->bd_bh; | 510 | bh = bd->bd_bh; |
536 | if (!buffer_dirty(bh)) | 511 | if (!buffer_dirty(bh)) |
537 | continue; | 512 | continue; |
@@ -558,7 +533,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |||
558 | 533 | ||
559 | gfs2_log_lock(sdp); | 534 | gfs2_log_lock(sdp); |
560 | while (!list_empty(&sdp->sd_log_le_ordered)) { | 535 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
561 | bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list); | 536 | bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_list); |
562 | bh = bd->bd_bh; | 537 | bh = bd->bd_bh; |
563 | if (buffer_locked(bh)) { | 538 | if (buffer_locked(bh)) { |
564 | get_bh(bh); | 539 | get_bh(bh); |
@@ -568,7 +543,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |||
568 | gfs2_log_lock(sdp); | 543 | gfs2_log_lock(sdp); |
569 | continue; | 544 | continue; |
570 | } | 545 | } |
571 | list_del_init(&bd->bd_le.le_list); | 546 | list_del_init(&bd->bd_list); |
572 | } | 547 | } |
573 | gfs2_log_unlock(sdp); | 548 | gfs2_log_unlock(sdp); |
574 | } | 549 | } |
@@ -580,25 +555,19 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |||
580 | * Returns: the initialized log buffer descriptor | 555 | * Returns: the initialized log buffer descriptor |
581 | */ | 556 | */ |
582 | 557 | ||
583 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | 558 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
584 | { | 559 | { |
585 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | ||
586 | struct buffer_head *bh; | ||
587 | struct gfs2_log_header *lh; | 560 | struct gfs2_log_header *lh; |
588 | unsigned int tail; | 561 | unsigned int tail; |
589 | u32 hash; | 562 | u32 hash; |
590 | 563 | int rw = WRITE_FLUSH_FUA | REQ_META; | |
591 | bh = sb_getblk(sdp->sd_vfs, blkno); | 564 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
592 | lock_buffer(bh); | 565 | lh = page_address(page); |
593 | memset(bh->b_data, 0, bh->b_size); | 566 | clear_page(lh); |
594 | set_buffer_uptodate(bh); | ||
595 | clear_buffer_dirty(bh); | ||
596 | 567 | ||
597 | gfs2_ail1_empty(sdp); | 568 | gfs2_ail1_empty(sdp); |
598 | tail = current_tail(sdp); | 569 | tail = current_tail(sdp); |
599 | 570 | ||
600 | lh = (struct gfs2_log_header *)bh->b_data; | ||
601 | memset(lh, 0, sizeof(struct gfs2_log_header)); | ||
602 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 571 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
603 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | 572 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); |
604 | lh->lh_header.__pad0 = cpu_to_be64(0); | 573 | lh->lh_header.__pad0 = cpu_to_be64(0); |
@@ -608,31 +577,22 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
608 | lh->lh_flags = cpu_to_be32(flags); | 577 | lh->lh_flags = cpu_to_be32(flags); |
609 | lh->lh_tail = cpu_to_be32(tail); | 578 | lh->lh_tail = cpu_to_be32(tail); |
610 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | 579 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); |
611 | hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); | 580 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
612 | lh->lh_hash = cpu_to_be32(hash); | 581 | lh->lh_hash = cpu_to_be32(hash); |
613 | 582 | ||
614 | bh->b_end_io = end_buffer_write_sync; | ||
615 | get_bh(bh); | ||
616 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { | 583 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
617 | gfs2_ordered_wait(sdp); | 584 | gfs2_ordered_wait(sdp); |
618 | log_flush_wait(sdp); | 585 | log_flush_wait(sdp); |
619 | submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); | 586 | rw = WRITE_SYNC | REQ_META | REQ_PRIO; |
620 | } else { | ||
621 | submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); | ||
622 | } | 587 | } |
623 | wait_on_buffer(bh); | ||
624 | 588 | ||
625 | if (!buffer_uptodate(bh)) | 589 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
626 | gfs2_io_error_bh(sdp, bh); | 590 | gfs2_log_write_page(sdp, page); |
627 | brelse(bh); | 591 | gfs2_log_flush_bio(sdp, rw); |
592 | log_flush_wait(sdp); | ||
628 | 593 | ||
629 | if (sdp->sd_log_tail != tail) | 594 | if (sdp->sd_log_tail != tail) |
630 | log_pull_tail(sdp, tail); | 595 | log_pull_tail(sdp, tail); |
631 | else | ||
632 | gfs2_assert_withdraw(sdp, !pull); | ||
633 | |||
634 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); | ||
635 | gfs2_log_incr_head(sdp); | ||
636 | } | 596 | } |
637 | 597 | ||
638 | /** | 598 | /** |
@@ -678,15 +638,14 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) | |||
678 | 638 | ||
679 | gfs2_ordered_write(sdp); | 639 | gfs2_ordered_write(sdp); |
680 | lops_before_commit(sdp); | 640 | lops_before_commit(sdp); |
641 | gfs2_log_flush_bio(sdp, WRITE); | ||
681 | 642 | ||
682 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 643 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
683 | log_write_header(sdp, 0, 0); | 644 | log_write_header(sdp, 0); |
684 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ | 645 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
685 | gfs2_log_lock(sdp); | ||
686 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ | 646 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
687 | trace_gfs2_log_blocks(sdp, -1); | 647 | trace_gfs2_log_blocks(sdp, -1); |
688 | gfs2_log_unlock(sdp); | 648 | log_write_header(sdp, 0); |
689 | log_write_header(sdp, 0, PULL); | ||
690 | } | 649 | } |
691 | lops_after_commit(sdp, ai); | 650 | lops_after_commit(sdp, ai); |
692 | 651 | ||
@@ -735,21 +694,6 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |||
735 | gfs2_log_unlock(sdp); | 694 | gfs2_log_unlock(sdp); |
736 | } | 695 | } |
737 | 696 | ||
738 | static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | ||
739 | { | ||
740 | struct list_head *head = &tr->tr_list_buf; | ||
741 | struct gfs2_bufdata *bd; | ||
742 | |||
743 | gfs2_log_lock(sdp); | ||
744 | while (!list_empty(head)) { | ||
745 | bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr); | ||
746 | list_del_init(&bd->bd_list_tr); | ||
747 | tr->tr_num_buf--; | ||
748 | } | ||
749 | gfs2_log_unlock(sdp); | ||
750 | gfs2_assert_warn(sdp, !tr->tr_num_buf); | ||
751 | } | ||
752 | |||
753 | /** | 697 | /** |
754 | * gfs2_log_commit - Commit a transaction to the log | 698 | * gfs2_log_commit - Commit a transaction to the log |
755 | * @sdp: the filesystem | 699 | * @sdp: the filesystem |
@@ -768,8 +712,6 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |||
768 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | 712 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
769 | { | 713 | { |
770 | log_refund(sdp, tr); | 714 | log_refund(sdp, tr); |
771 | buf_lo_incore_commit(sdp, tr); | ||
772 | |||
773 | up_read(&sdp->sd_log_flush_lock); | 715 | up_read(&sdp->sd_log_flush_lock); |
774 | 716 | ||
775 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || | 717 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
@@ -798,8 +740,7 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |||
798 | sdp->sd_log_flush_head = sdp->sd_log_head; | 740 | sdp->sd_log_flush_head = sdp->sd_log_head; |
799 | sdp->sd_log_flush_wrapped = 0; | 741 | sdp->sd_log_flush_wrapped = 0; |
800 | 742 | ||
801 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, | 743 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); |
802 | (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL); | ||
803 | 744 | ||
804 | gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); | 745 | gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); |
805 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); | 746 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
@@ -854,11 +795,9 @@ int gfs2_logd(void *data) | |||
854 | struct gfs2_sbd *sdp = data; | 795 | struct gfs2_sbd *sdp = data; |
855 | unsigned long t = 1; | 796 | unsigned long t = 1; |
856 | DEFINE_WAIT(wait); | 797 | DEFINE_WAIT(wait); |
857 | unsigned preflush; | ||
858 | 798 | ||
859 | while (!kthread_should_stop()) { | 799 | while (!kthread_should_stop()) { |
860 | 800 | ||
861 | preflush = atomic_read(&sdp->sd_log_pinned); | ||
862 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { | 801 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
863 | gfs2_ail1_empty(sdp); | 802 | gfs2_ail1_empty(sdp); |
864 | gfs2_log_flush(sdp, NULL); | 803 | gfs2_log_flush(sdp, NULL); |
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index ff07454b582c..3fd5215ea25f 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h | |||
@@ -52,8 +52,6 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |||
52 | unsigned int ssize); | 52 | unsigned int ssize); |
53 | 53 | ||
54 | extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); | 54 | extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); |
55 | extern void gfs2_log_incr_head(struct gfs2_sbd *sdp); | ||
56 | extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn); | ||
57 | extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); | 55 | extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); |
58 | extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); | 56 | extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); |
59 | extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); | 57 | extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 6b1efb594d90..852c1be1dd3b 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -127,146 +127,277 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
127 | atomic_dec(&sdp->sd_log_pinned); | 127 | atomic_dec(&sdp->sd_log_pinned); |
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | static void gfs2_log_incr_head(struct gfs2_sbd *sdp) | |
131 | static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh) | ||
132 | { | 131 | { |
133 | return (struct gfs2_log_descriptor *)bh->b_data; | 132 | BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && |
133 | (sdp->sd_log_flush_head != sdp->sd_log_head)); | ||
134 | |||
135 | if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { | ||
136 | sdp->sd_log_flush_head = 0; | ||
137 | sdp->sd_log_flush_wrapped = 1; | ||
138 | } | ||
134 | } | 139 | } |
135 | 140 | ||
136 | static inline __be64 *bh_log_ptr(struct buffer_head *bh) | 141 | static u64 gfs2_log_bmap(struct gfs2_sbd *sdp) |
137 | { | 142 | { |
138 | struct gfs2_log_descriptor *ld = bh_log_desc(bh); | 143 | unsigned int lbn = sdp->sd_log_flush_head; |
139 | return (__force __be64 *)(ld + 1); | 144 | struct gfs2_journal_extent *je; |
145 | u64 block; | ||
146 | |||
147 | list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { | ||
148 | if (lbn >= je->lblock && lbn < je->lblock + je->blocks) { | ||
149 | block = je->dblock + lbn - je->lblock; | ||
150 | gfs2_log_incr_head(sdp); | ||
151 | return block; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | return -1; | ||
140 | } | 156 | } |
141 | 157 | ||
142 | static inline __be64 *bh_ptr_end(struct buffer_head *bh) | 158 | /** |
159 | * gfs2_end_log_write_bh - end log write of pagecache data with buffers | ||
160 | * @sdp: The superblock | ||
161 | * @bvec: The bio_vec | ||
162 | * @error: The i/o status | ||
163 | * | ||
164 | * This finds the relavent buffers and unlocks then and sets the | ||
165 | * error flag according to the status of the i/o request. This is | ||
166 | * used when the log is writing data which has an in-place version | ||
167 | * that is pinned in the pagecache. | ||
168 | */ | ||
169 | |||
170 | static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | ||
171 | int error) | ||
143 | { | 172 | { |
144 | return (__force __be64 *)(bh->b_data + bh->b_size); | 173 | struct buffer_head *bh, *next; |
174 | struct page *page = bvec->bv_page; | ||
175 | unsigned size; | ||
176 | |||
177 | bh = page_buffers(page); | ||
178 | size = bvec->bv_len; | ||
179 | while (bh_offset(bh) < bvec->bv_offset) | ||
180 | bh = bh->b_this_page; | ||
181 | do { | ||
182 | if (error) | ||
183 | set_buffer_write_io_error(bh); | ||
184 | unlock_buffer(bh); | ||
185 | next = bh->b_this_page; | ||
186 | size -= bh->b_size; | ||
187 | brelse(bh); | ||
188 | bh = next; | ||
189 | } while(bh && size); | ||
145 | } | 190 | } |
146 | 191 | ||
147 | /** | 192 | /** |
148 | * gfs2_log_write_endio - End of I/O for a log buffer | 193 | * gfs2_end_log_write - end of i/o to the log |
149 | * @bh: The buffer head | 194 | * @bio: The bio |
150 | * @uptodate: I/O Status | 195 | * @error: Status of i/o request |
196 | * | ||
197 | * Each bio_vec contains either data from the pagecache or data | ||
198 | * relating to the log itself. Here we iterate over the bio_vec | ||
199 | * array, processing both kinds of data. | ||
151 | * | 200 | * |
152 | */ | 201 | */ |
153 | 202 | ||
154 | static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate) | 203 | static void gfs2_end_log_write(struct bio *bio, int error) |
155 | { | 204 | { |
156 | struct gfs2_sbd *sdp = bh->b_private; | 205 | struct gfs2_sbd *sdp = bio->bi_private; |
157 | bh->b_private = NULL; | 206 | struct bio_vec *bvec; |
207 | struct page *page; | ||
208 | int i; | ||
158 | 209 | ||
159 | end_buffer_write_sync(bh, uptodate); | 210 | if (error) { |
211 | sdp->sd_log_error = error; | ||
212 | fs_err(sdp, "Error %d writing to log\n", error); | ||
213 | } | ||
214 | |||
215 | bio_for_each_segment(bvec, bio, i) { | ||
216 | page = bvec->bv_page; | ||
217 | if (page_has_buffers(page)) | ||
218 | gfs2_end_log_write_bh(sdp, bvec, error); | ||
219 | else | ||
220 | mempool_free(page, gfs2_page_pool); | ||
221 | } | ||
222 | |||
223 | bio_put(bio); | ||
160 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | 224 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) |
161 | wake_up(&sdp->sd_log_flush_wait); | 225 | wake_up(&sdp->sd_log_flush_wait); |
162 | } | 226 | } |
163 | 227 | ||
164 | /** | 228 | /** |
165 | * gfs2_log_get_buf - Get and initialize a buffer to use for log control data | 229 | * gfs2_log_flush_bio - Submit any pending log bio |
166 | * @sdp: The GFS2 superblock | 230 | * @sdp: The superblock |
231 | * @rw: The rw flags | ||
167 | * | 232 | * |
168 | * tReturns: the buffer_head | 233 | * Submit any pending part-built or full bio to the block device. If |
234 | * there is no pending bio, then this is a no-op. | ||
169 | */ | 235 | */ |
170 | 236 | ||
171 | static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) | 237 | void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw) |
172 | { | 238 | { |
173 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | 239 | if (sdp->sd_log_bio) { |
174 | struct buffer_head *bh; | 240 | atomic_inc(&sdp->sd_log_in_flight); |
241 | submit_bio(rw, sdp->sd_log_bio); | ||
242 | sdp->sd_log_bio = NULL; | ||
243 | } | ||
244 | } | ||
175 | 245 | ||
176 | bh = sb_getblk(sdp->sd_vfs, blkno); | 246 | /** |
177 | lock_buffer(bh); | 247 | * gfs2_log_alloc_bio - Allocate a new bio for log writing |
178 | memset(bh->b_data, 0, bh->b_size); | 248 | * @sdp: The superblock |
179 | set_buffer_uptodate(bh); | 249 | * @blkno: The next device block number we want to write to |
180 | clear_buffer_dirty(bh); | 250 | * |
181 | gfs2_log_incr_head(sdp); | 251 | * This should never be called when there is a cached bio in the |
182 | atomic_inc(&sdp->sd_log_in_flight); | 252 | * super block. When it returns, there will be a cached bio in the |
183 | bh->b_private = sdp; | 253 | * super block which will have as many bio_vecs as the device is |
184 | bh->b_end_io = gfs2_log_write_endio; | 254 | * happy to handle. |
255 | * | ||
256 | * Returns: Newly allocated bio | ||
257 | */ | ||
185 | 258 | ||
186 | return bh; | 259 | static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) |
260 | { | ||
261 | struct super_block *sb = sdp->sd_vfs; | ||
262 | unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev); | ||
263 | struct bio *bio; | ||
264 | |||
265 | BUG_ON(sdp->sd_log_bio); | ||
266 | |||
267 | while (1) { | ||
268 | bio = bio_alloc(GFP_NOIO, nrvecs); | ||
269 | if (likely(bio)) | ||
270 | break; | ||
271 | nrvecs = max(nrvecs/2, 1U); | ||
272 | } | ||
273 | |||
274 | bio->bi_sector = blkno * (sb->s_blocksize >> 9); | ||
275 | bio->bi_bdev = sb->s_bdev; | ||
276 | bio->bi_end_io = gfs2_end_log_write; | ||
277 | bio->bi_private = sdp; | ||
278 | |||
279 | sdp->sd_log_bio = bio; | ||
280 | |||
281 | return bio; | ||
187 | } | 282 | } |
188 | 283 | ||
189 | /** | 284 | /** |
190 | * gfs2_fake_write_endio - | 285 | * gfs2_log_get_bio - Get cached log bio, or allocate a new one |
191 | * @bh: The buffer head | 286 | * @sdp: The superblock |
192 | * @uptodate: The I/O Status | 287 | * @blkno: The device block number we want to write to |
288 | * | ||
289 | * If there is a cached bio, then if the next block number is sequential | ||
290 | * with the previous one, return it, otherwise flush the bio to the | ||
291 | * device. If there is not a cached bio, or we just flushed it, then | ||
292 | * allocate a new one. | ||
193 | * | 293 | * |
294 | * Returns: The bio to use for log writes | ||
194 | */ | 295 | */ |
195 | 296 | ||
196 | static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) | 297 | static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) |
197 | { | 298 | { |
198 | struct buffer_head *real_bh = bh->b_private; | 299 | struct bio *bio = sdp->sd_log_bio; |
199 | struct gfs2_bufdata *bd = real_bh->b_private; | 300 | u64 nblk; |
200 | struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; | 301 | |
302 | if (bio) { | ||
303 | nblk = bio->bi_sector + bio_sectors(bio); | ||
304 | nblk >>= sdp->sd_fsb2bb_shift; | ||
305 | if (blkno == nblk) | ||
306 | return bio; | ||
307 | gfs2_log_flush_bio(sdp, WRITE); | ||
308 | } | ||
201 | 309 | ||
202 | end_buffer_write_sync(bh, uptodate); | 310 | return gfs2_log_alloc_bio(sdp, blkno); |
203 | mempool_free(bh, gfs2_bh_pool); | ||
204 | unlock_buffer(real_bh); | ||
205 | brelse(real_bh); | ||
206 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | ||
207 | wake_up(&sdp->sd_log_flush_wait); | ||
208 | } | 311 | } |
209 | 312 | ||
313 | |||
210 | /** | 314 | /** |
211 | * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log | 315 | * gfs2_log_write - write to log |
212 | * @sdp: the filesystem | 316 | * @sdp: the filesystem |
213 | * @data: the data the buffer_head should point to | 317 | * @page: the page to write |
318 | * @size: the size of the data to write | ||
319 | * @offset: the offset within the page | ||
214 | * | 320 | * |
215 | * Returns: the log buffer descriptor | 321 | * Try and add the page segment to the current bio. If that fails, |
322 | * submit the current bio to the device and create a new one, and | ||
323 | * then add the page segment to that. | ||
216 | */ | 324 | */ |
217 | 325 | ||
218 | static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, | 326 | static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, |
219 | struct buffer_head *real) | 327 | unsigned size, unsigned offset) |
220 | { | 328 | { |
221 | u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head); | 329 | u64 blkno = gfs2_log_bmap(sdp); |
222 | struct buffer_head *bh; | 330 | struct bio *bio; |
331 | int ret; | ||
332 | |||
333 | bio = gfs2_log_get_bio(sdp, blkno); | ||
334 | ret = bio_add_page(bio, page, size, offset); | ||
335 | if (ret == 0) { | ||
336 | gfs2_log_flush_bio(sdp, WRITE); | ||
337 | bio = gfs2_log_alloc_bio(sdp, blkno); | ||
338 | ret = bio_add_page(bio, page, size, offset); | ||
339 | WARN_ON(ret == 0); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * gfs2_log_write_bh - write a buffer's content to the log | ||
345 | * @sdp: The super block | ||
346 | * @bh: The buffer pointing to the in-place location | ||
347 | * | ||
348 | * This writes the content of the buffer to the next available location | ||
349 | * in the log. The buffer will be unlocked once the i/o to the log has | ||
350 | * completed. | ||
351 | */ | ||
223 | 352 | ||
224 | bh = mempool_alloc(gfs2_bh_pool, GFP_NOFS); | 353 | static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) |
225 | atomic_set(&bh->b_count, 1); | 354 | { |
226 | bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); | 355 | gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh)); |
227 | set_bh_page(bh, real->b_page, bh_offset(real)); | 356 | } |
228 | bh->b_blocknr = blkno; | ||
229 | bh->b_size = sdp->sd_sb.sb_bsize; | ||
230 | bh->b_bdev = sdp->sd_vfs->s_bdev; | ||
231 | bh->b_private = real; | ||
232 | bh->b_end_io = gfs2_fake_write_endio; | ||
233 | 357 | ||
234 | gfs2_log_incr_head(sdp); | 358 | /** |
235 | atomic_inc(&sdp->sd_log_in_flight); | 359 | * gfs2_log_write_page - write one block stored in a page, into the log |
360 | * @sdp: The superblock | ||
361 | * @page: The struct page | ||
362 | * | ||
363 | * This writes the first block-sized part of the page into the log. Note | ||
364 | * that the page must have been allocated from the gfs2_page_pool mempool | ||
365 | * and that after this has been called, ownership has been transferred and | ||
366 | * the page may be freed at any time. | ||
367 | */ | ||
236 | 368 | ||
237 | return bh; | 369 | void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) |
370 | { | ||
371 | struct super_block *sb = sdp->sd_vfs; | ||
372 | gfs2_log_write(sdp, page, sb->s_blocksize, 0); | ||
238 | } | 373 | } |
239 | 374 | ||
240 | static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | 375 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, |
376 | u32 ld_length, u32 ld_data1) | ||
241 | { | 377 | { |
242 | struct buffer_head *bh = gfs2_log_get_buf(sdp); | 378 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
243 | struct gfs2_log_descriptor *ld = bh_log_desc(bh); | 379 | struct gfs2_log_descriptor *ld = page_address(page); |
380 | clear_page(ld); | ||
244 | ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 381 | ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
245 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); | 382 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); |
246 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); | 383 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); |
247 | ld->ld_type = cpu_to_be32(ld_type); | 384 | ld->ld_type = cpu_to_be32(ld_type); |
248 | ld->ld_length = 0; | 385 | ld->ld_length = cpu_to_be32(ld_length); |
249 | ld->ld_data1 = 0; | 386 | ld->ld_data1 = cpu_to_be32(ld_data1); |
250 | ld->ld_data2 = 0; | 387 | ld->ld_data2 = 0; |
251 | memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); | 388 | return page; |
252 | return bh; | ||
253 | } | 389 | } |
254 | 390 | ||
255 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 391 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
256 | { | 392 | { |
257 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); | ||
258 | struct gfs2_meta_header *mh; | 393 | struct gfs2_meta_header *mh; |
259 | struct gfs2_trans *tr; | 394 | struct gfs2_trans *tr; |
260 | 395 | ||
261 | lock_buffer(bd->bd_bh); | 396 | lock_buffer(bd->bd_bh); |
262 | gfs2_log_lock(sdp); | 397 | gfs2_log_lock(sdp); |
263 | if (!list_empty(&bd->bd_list_tr)) | ||
264 | goto out; | ||
265 | tr = current->journal_info; | 398 | tr = current->journal_info; |
266 | tr->tr_touched = 1; | 399 | tr->tr_touched = 1; |
267 | tr->tr_num_buf++; | 400 | if (!list_empty(&bd->bd_list)) |
268 | list_add(&bd->bd_list_tr, &tr->tr_list_buf); | ||
269 | if (!list_empty(&le->le_list)) | ||
270 | goto out; | 401 | goto out; |
271 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); | 402 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); |
272 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | 403 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
@@ -276,62 +407,86 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | |||
276 | mh->__pad0 = cpu_to_be64(0); | 407 | mh->__pad0 = cpu_to_be64(0); |
277 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | 408 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); |
278 | sdp->sd_log_num_buf++; | 409 | sdp->sd_log_num_buf++; |
279 | list_add(&le->le_list, &sdp->sd_log_le_buf); | 410 | list_add(&bd->bd_list, &sdp->sd_log_le_buf); |
280 | tr->tr_num_buf_new++; | 411 | tr->tr_num_buf_new++; |
281 | out: | 412 | out: |
282 | gfs2_log_unlock(sdp); | 413 | gfs2_log_unlock(sdp); |
283 | unlock_buffer(bd->bd_bh); | 414 | unlock_buffer(bd->bd_bh); |
284 | } | 415 | } |
285 | 416 | ||
286 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) | 417 | static void gfs2_check_magic(struct buffer_head *bh) |
418 | { | ||
419 | void *kaddr; | ||
420 | __be32 *ptr; | ||
421 | |||
422 | clear_buffer_escaped(bh); | ||
423 | kaddr = kmap_atomic(bh->b_page); | ||
424 | ptr = kaddr + bh_offset(bh); | ||
425 | if (*ptr == cpu_to_be32(GFS2_MAGIC)) | ||
426 | set_buffer_escaped(bh); | ||
427 | kunmap_atomic(kaddr); | ||
428 | } | ||
429 | |||
430 | static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, | ||
431 | unsigned int total, struct list_head *blist, | ||
432 | bool is_databuf) | ||
287 | { | 433 | { |
288 | struct buffer_head *bh; | ||
289 | struct gfs2_log_descriptor *ld; | 434 | struct gfs2_log_descriptor *ld; |
290 | struct gfs2_bufdata *bd1 = NULL, *bd2; | 435 | struct gfs2_bufdata *bd1 = NULL, *bd2; |
291 | unsigned int total; | 436 | struct page *page; |
292 | unsigned int limit; | ||
293 | unsigned int num; | 437 | unsigned int num; |
294 | unsigned n; | 438 | unsigned n; |
295 | __be64 *ptr; | 439 | __be64 *ptr; |
296 | 440 | ||
297 | limit = buf_limit(sdp); | ||
298 | /* for 4k blocks, limit = 503 */ | ||
299 | |||
300 | gfs2_log_lock(sdp); | 441 | gfs2_log_lock(sdp); |
301 | total = sdp->sd_log_num_buf; | 442 | bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); |
302 | bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list); | ||
303 | while(total) { | 443 | while(total) { |
304 | num = total; | 444 | num = total; |
305 | if (total > limit) | 445 | if (total > limit) |
306 | num = limit; | 446 | num = limit; |
307 | gfs2_log_unlock(sdp); | 447 | gfs2_log_unlock(sdp); |
308 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); | 448 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num); |
449 | ld = page_address(page); | ||
309 | gfs2_log_lock(sdp); | 450 | gfs2_log_lock(sdp); |
310 | ld = bh_log_desc(bh); | 451 | ptr = (__be64 *)(ld + 1); |
311 | ptr = bh_log_ptr(bh); | ||
312 | ld->ld_length = cpu_to_be32(num + 1); | ||
313 | ld->ld_data1 = cpu_to_be32(num); | ||
314 | 452 | ||
315 | n = 0; | 453 | n = 0; |
316 | list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, | 454 | list_for_each_entry_continue(bd1, blist, bd_list) { |
317 | bd_le.le_list) { | ||
318 | *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); | 455 | *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); |
456 | if (is_databuf) { | ||
457 | gfs2_check_magic(bd1->bd_bh); | ||
458 | *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); | ||
459 | } | ||
319 | if (++n >= num) | 460 | if (++n >= num) |
320 | break; | 461 | break; |
321 | } | 462 | } |
322 | 463 | ||
323 | gfs2_log_unlock(sdp); | 464 | gfs2_log_unlock(sdp); |
324 | submit_bh(WRITE_SYNC, bh); | 465 | gfs2_log_write_page(sdp, page); |
325 | gfs2_log_lock(sdp); | 466 | gfs2_log_lock(sdp); |
326 | 467 | ||
327 | n = 0; | 468 | n = 0; |
328 | list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, | 469 | list_for_each_entry_continue(bd2, blist, bd_list) { |
329 | bd_le.le_list) { | ||
330 | get_bh(bd2->bd_bh); | 470 | get_bh(bd2->bd_bh); |
331 | gfs2_log_unlock(sdp); | 471 | gfs2_log_unlock(sdp); |
332 | lock_buffer(bd2->bd_bh); | 472 | lock_buffer(bd2->bd_bh); |
333 | bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); | 473 | |
334 | submit_bh(WRITE_SYNC, bh); | 474 | if (buffer_escaped(bd2->bd_bh)) { |
475 | void *kaddr; | ||
476 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | ||
477 | ptr = page_address(page); | ||
478 | kaddr = kmap_atomic(bd2->bd_bh->b_page); | ||
479 | memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), | ||
480 | bd2->bd_bh->b_size); | ||
481 | kunmap_atomic(kaddr); | ||
482 | *(__be32 *)ptr = 0; | ||
483 | clear_buffer_escaped(bd2->bd_bh); | ||
484 | unlock_buffer(bd2->bd_bh); | ||
485 | brelse(bd2->bd_bh); | ||
486 | gfs2_log_write_page(sdp, page); | ||
487 | } else { | ||
488 | gfs2_log_write_bh(sdp, bd2->bd_bh); | ||
489 | } | ||
335 | gfs2_log_lock(sdp); | 490 | gfs2_log_lock(sdp); |
336 | if (++n >= num) | 491 | if (++n >= num) |
337 | break; | 492 | break; |
@@ -343,14 +498,22 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
343 | gfs2_log_unlock(sdp); | 498 | gfs2_log_unlock(sdp); |
344 | } | 499 | } |
345 | 500 | ||
501 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) | ||
502 | { | ||
503 | unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ | ||
504 | |||
505 | gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf, | ||
506 | &sdp->sd_log_le_buf, 0); | ||
507 | } | ||
508 | |||
346 | static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | 509 | static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) |
347 | { | 510 | { |
348 | struct list_head *head = &sdp->sd_log_le_buf; | 511 | struct list_head *head = &sdp->sd_log_le_buf; |
349 | struct gfs2_bufdata *bd; | 512 | struct gfs2_bufdata *bd; |
350 | 513 | ||
351 | while (!list_empty(head)) { | 514 | while (!list_empty(head)) { |
352 | bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); | 515 | bd = list_entry(head->next, struct gfs2_bufdata, bd_list); |
353 | list_del_init(&bd->bd_le.le_list); | 516 | list_del_init(&bd->bd_list); |
354 | sdp->sd_log_num_buf--; | 517 | sdp->sd_log_num_buf--; |
355 | 518 | ||
356 | gfs2_unpin(sdp, bd->bd_bh, ai); | 519 | gfs2_unpin(sdp, bd->bd_bh, ai); |
@@ -437,9 +600,8 @@ static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) | |||
437 | jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); | 600 | jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); |
438 | } | 601 | } |
439 | 602 | ||
440 | static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 603 | static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
441 | { | 604 | { |
442 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); | ||
443 | struct gfs2_glock *gl = bd->bd_gl; | 605 | struct gfs2_glock *gl = bd->bd_gl; |
444 | struct gfs2_trans *tr; | 606 | struct gfs2_trans *tr; |
445 | 607 | ||
@@ -449,48 +611,48 @@ static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | |||
449 | sdp->sd_log_num_revoke++; | 611 | sdp->sd_log_num_revoke++; |
450 | atomic_inc(&gl->gl_revokes); | 612 | atomic_inc(&gl->gl_revokes); |
451 | set_bit(GLF_LFLUSH, &gl->gl_flags); | 613 | set_bit(GLF_LFLUSH, &gl->gl_flags); |
452 | list_add(&le->le_list, &sdp->sd_log_le_revoke); | 614 | list_add(&bd->bd_list, &sdp->sd_log_le_revoke); |
453 | } | 615 | } |
454 | 616 | ||
455 | static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | 617 | static void revoke_lo_before_commit(struct gfs2_sbd *sdp) |
456 | { | 618 | { |
457 | struct gfs2_log_descriptor *ld; | 619 | struct gfs2_log_descriptor *ld; |
458 | struct gfs2_meta_header *mh; | 620 | struct gfs2_meta_header *mh; |
459 | struct buffer_head *bh; | ||
460 | unsigned int offset; | 621 | unsigned int offset; |
461 | struct list_head *head = &sdp->sd_log_le_revoke; | 622 | struct list_head *head = &sdp->sd_log_le_revoke; |
462 | struct gfs2_bufdata *bd; | 623 | struct gfs2_bufdata *bd; |
624 | struct page *page; | ||
625 | unsigned int length; | ||
463 | 626 | ||
464 | if (!sdp->sd_log_num_revoke) | 627 | if (!sdp->sd_log_num_revoke) |
465 | return; | 628 | return; |
466 | 629 | ||
467 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); | 630 | length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); |
468 | ld = bh_log_desc(bh); | 631 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); |
469 | ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, | 632 | ld = page_address(page); |
470 | sizeof(u64))); | ||
471 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); | ||
472 | offset = sizeof(struct gfs2_log_descriptor); | 633 | offset = sizeof(struct gfs2_log_descriptor); |
473 | 634 | ||
474 | list_for_each_entry(bd, head, bd_le.le_list) { | 635 | list_for_each_entry(bd, head, bd_list) { |
475 | sdp->sd_log_num_revoke--; | 636 | sdp->sd_log_num_revoke--; |
476 | 637 | ||
477 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { | 638 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { |
478 | submit_bh(WRITE_SYNC, bh); | ||
479 | 639 | ||
480 | bh = gfs2_log_get_buf(sdp); | 640 | gfs2_log_write_page(sdp, page); |
481 | mh = (struct gfs2_meta_header *)bh->b_data; | 641 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
642 | mh = page_address(page); | ||
643 | clear_page(mh); | ||
482 | mh->mh_magic = cpu_to_be32(GFS2_MAGIC); | 644 | mh->mh_magic = cpu_to_be32(GFS2_MAGIC); |
483 | mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); | 645 | mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); |
484 | mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); | 646 | mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); |
485 | offset = sizeof(struct gfs2_meta_header); | 647 | offset = sizeof(struct gfs2_meta_header); |
486 | } | 648 | } |
487 | 649 | ||
488 | *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); | 650 | *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); |
489 | offset += sizeof(u64); | 651 | offset += sizeof(u64); |
490 | } | 652 | } |
491 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | 653 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
492 | 654 | ||
493 | submit_bh(WRITE_SYNC, bh); | 655 | gfs2_log_write_page(sdp, page); |
494 | } | 656 | } |
495 | 657 | ||
496 | static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | 658 | static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) |
@@ -500,8 +662,8 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | |||
500 | struct gfs2_glock *gl; | 662 | struct gfs2_glock *gl; |
501 | 663 | ||
502 | while (!list_empty(head)) { | 664 | while (!list_empty(head)) { |
503 | bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); | 665 | bd = list_entry(head->next, struct gfs2_bufdata, bd_list); |
504 | list_del_init(&bd->bd_le.le_list); | 666 | list_del_init(&bd->bd_list); |
505 | gl = bd->bd_gl; | 667 | gl = bd->bd_gl; |
506 | atomic_dec(&gl->gl_revokes); | 668 | atomic_dec(&gl->gl_revokes); |
507 | clear_bit(GLF_LFLUSH, &gl->gl_flags); | 669 | clear_bit(GLF_LFLUSH, &gl->gl_flags); |
@@ -604,108 +766,33 @@ static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) | |||
604 | * blocks, which isn't an enormous overhead but twice as much as | 766 | * blocks, which isn't an enormous overhead but twice as much as |
605 | * for normal metadata blocks. | 767 | * for normal metadata blocks. |
606 | */ | 768 | */ |
607 | static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 769 | static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
608 | { | 770 | { |
609 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); | ||
610 | struct gfs2_trans *tr = current->journal_info; | 771 | struct gfs2_trans *tr = current->journal_info; |
611 | struct address_space *mapping = bd->bd_bh->b_page->mapping; | 772 | struct address_space *mapping = bd->bd_bh->b_page->mapping; |
612 | struct gfs2_inode *ip = GFS2_I(mapping->host); | 773 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
613 | 774 | ||
614 | lock_buffer(bd->bd_bh); | 775 | lock_buffer(bd->bd_bh); |
615 | gfs2_log_lock(sdp); | 776 | gfs2_log_lock(sdp); |
616 | if (tr) { | 777 | if (tr) |
617 | if (!list_empty(&bd->bd_list_tr)) | ||
618 | goto out; | ||
619 | tr->tr_touched = 1; | 778 | tr->tr_touched = 1; |
620 | if (gfs2_is_jdata(ip)) { | 779 | if (!list_empty(&bd->bd_list)) |
621 | tr->tr_num_buf++; | ||
622 | list_add(&bd->bd_list_tr, &tr->tr_list_buf); | ||
623 | } | ||
624 | } | ||
625 | if (!list_empty(&le->le_list)) | ||
626 | goto out; | 780 | goto out; |
627 | |||
628 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); | 781 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); |
629 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | 782 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
630 | if (gfs2_is_jdata(ip)) { | 783 | if (gfs2_is_jdata(ip)) { |
631 | gfs2_pin(sdp, bd->bd_bh); | 784 | gfs2_pin(sdp, bd->bd_bh); |
632 | tr->tr_num_databuf_new++; | 785 | tr->tr_num_databuf_new++; |
633 | sdp->sd_log_num_databuf++; | 786 | sdp->sd_log_num_databuf++; |
634 | list_add_tail(&le->le_list, &sdp->sd_log_le_databuf); | 787 | list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf); |
635 | } else { | 788 | } else { |
636 | list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); | 789 | list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered); |
637 | } | 790 | } |
638 | out: | 791 | out: |
639 | gfs2_log_unlock(sdp); | 792 | gfs2_log_unlock(sdp); |
640 | unlock_buffer(bd->bd_bh); | 793 | unlock_buffer(bd->bd_bh); |
641 | } | 794 | } |
642 | 795 | ||
643 | static void gfs2_check_magic(struct buffer_head *bh) | ||
644 | { | ||
645 | void *kaddr; | ||
646 | __be32 *ptr; | ||
647 | |||
648 | clear_buffer_escaped(bh); | ||
649 | kaddr = kmap_atomic(bh->b_page); | ||
650 | ptr = kaddr + bh_offset(bh); | ||
651 | if (*ptr == cpu_to_be32(GFS2_MAGIC)) | ||
652 | set_buffer_escaped(bh); | ||
653 | kunmap_atomic(kaddr); | ||
654 | } | ||
655 | |||
656 | static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | ||
657 | struct list_head *list, struct list_head *done, | ||
658 | unsigned int n) | ||
659 | { | ||
660 | struct buffer_head *bh1; | ||
661 | struct gfs2_log_descriptor *ld; | ||
662 | struct gfs2_bufdata *bd; | ||
663 | __be64 *ptr; | ||
664 | |||
665 | if (!bh) | ||
666 | return; | ||
667 | |||
668 | ld = bh_log_desc(bh); | ||
669 | ld->ld_length = cpu_to_be32(n + 1); | ||
670 | ld->ld_data1 = cpu_to_be32(n); | ||
671 | |||
672 | ptr = bh_log_ptr(bh); | ||
673 | |||
674 | get_bh(bh); | ||
675 | submit_bh(WRITE_SYNC, bh); | ||
676 | gfs2_log_lock(sdp); | ||
677 | while(!list_empty(list)) { | ||
678 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); | ||
679 | list_move_tail(&bd->bd_le.le_list, done); | ||
680 | get_bh(bd->bd_bh); | ||
681 | while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) { | ||
682 | gfs2_log_incr_head(sdp); | ||
683 | ptr += 2; | ||
684 | } | ||
685 | gfs2_log_unlock(sdp); | ||
686 | lock_buffer(bd->bd_bh); | ||
687 | if (buffer_escaped(bd->bd_bh)) { | ||
688 | void *kaddr; | ||
689 | bh1 = gfs2_log_get_buf(sdp); | ||
690 | kaddr = kmap_atomic(bd->bd_bh->b_page); | ||
691 | memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), | ||
692 | bh1->b_size); | ||
693 | kunmap_atomic(kaddr); | ||
694 | *(__be32 *)bh1->b_data = 0; | ||
695 | clear_buffer_escaped(bd->bd_bh); | ||
696 | unlock_buffer(bd->bd_bh); | ||
697 | brelse(bd->bd_bh); | ||
698 | } else { | ||
699 | bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); | ||
700 | } | ||
701 | submit_bh(WRITE_SYNC, bh1); | ||
702 | gfs2_log_lock(sdp); | ||
703 | ptr += 2; | ||
704 | } | ||
705 | gfs2_log_unlock(sdp); | ||
706 | brelse(bh); | ||
707 | } | ||
708 | |||
709 | /** | 796 | /** |
710 | * databuf_lo_before_commit - Scan the data buffers, writing as we go | 797 | * databuf_lo_before_commit - Scan the data buffers, writing as we go |
711 | * | 798 | * |
@@ -713,37 +800,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
713 | 800 | ||
714 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) | 801 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) |
715 | { | 802 | { |
716 | struct gfs2_bufdata *bd = NULL; | 803 | unsigned int limit = buf_limit(sdp) / 2; |
717 | struct buffer_head *bh = NULL; | ||
718 | unsigned int n = 0; | ||
719 | __be64 *ptr = NULL, *end = NULL; | ||
720 | LIST_HEAD(processed); | ||
721 | LIST_HEAD(in_progress); | ||
722 | 804 | ||
723 | gfs2_log_lock(sdp); | 805 | gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf, |
724 | while (!list_empty(&sdp->sd_log_le_databuf)) { | 806 | &sdp->sd_log_le_databuf, 1); |
725 | if (ptr == end) { | ||
726 | gfs2_log_unlock(sdp); | ||
727 | gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); | ||
728 | n = 0; | ||
729 | bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); | ||
730 | ptr = bh_log_ptr(bh); | ||
731 | end = bh_ptr_end(bh) - 1; | ||
732 | gfs2_log_lock(sdp); | ||
733 | continue; | ||
734 | } | ||
735 | bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list); | ||
736 | list_move_tail(&bd->bd_le.le_list, &in_progress); | ||
737 | gfs2_check_magic(bd->bd_bh); | ||
738 | *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); | ||
739 | *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0); | ||
740 | n++; | ||
741 | } | ||
742 | gfs2_log_unlock(sdp); | ||
743 | gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); | ||
744 | gfs2_log_lock(sdp); | ||
745 | list_splice(&processed, &sdp->sd_log_le_databuf); | ||
746 | gfs2_log_unlock(sdp); | ||
747 | } | 807 | } |
748 | 808 | ||
749 | static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, | 809 | static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, |
@@ -822,8 +882,8 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | |||
822 | struct gfs2_bufdata *bd; | 882 | struct gfs2_bufdata *bd; |
823 | 883 | ||
824 | while (!list_empty(head)) { | 884 | while (!list_empty(head)) { |
825 | bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); | 885 | bd = list_entry(head->next, struct gfs2_bufdata, bd_list); |
826 | list_del_init(&bd->bd_le.le_list); | 886 | list_del_init(&bd->bd_list); |
827 | sdp->sd_log_num_databuf--; | 887 | sdp->sd_log_num_databuf--; |
828 | gfs2_unpin(sdp, bd->bd_bh, ai); | 888 | gfs2_unpin(sdp, bd->bd_bh, ai); |
829 | } | 889 | } |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 3c0b2737658a..954a330585f4 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -27,6 +27,8 @@ extern const struct gfs2_log_operations gfs2_rg_lops; | |||
27 | extern const struct gfs2_log_operations gfs2_databuf_lops; | 27 | extern const struct gfs2_log_operations gfs2_databuf_lops; |
28 | 28 | ||
29 | extern const struct gfs2_log_operations *gfs2_log_ops[]; | 29 | extern const struct gfs2_log_operations *gfs2_log_ops[]; |
30 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | ||
31 | extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw); | ||
30 | 32 | ||
31 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 33 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
32 | { | 34 | { |
@@ -44,17 +46,17 @@ static inline unsigned int databuf_limit(struct gfs2_sbd *sdp) | |||
44 | return limit; | 46 | return limit; |
45 | } | 47 | } |
46 | 48 | ||
47 | static inline void lops_init_le(struct gfs2_log_element *le, | 49 | static inline void lops_init_le(struct gfs2_bufdata *bd, |
48 | const struct gfs2_log_operations *lops) | 50 | const struct gfs2_log_operations *lops) |
49 | { | 51 | { |
50 | INIT_LIST_HEAD(&le->le_list); | 52 | INIT_LIST_HEAD(&bd->bd_list); |
51 | le->le_ops = lops; | 53 | bd->bd_ops = lops; |
52 | } | 54 | } |
53 | 55 | ||
54 | static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 56 | static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
55 | { | 57 | { |
56 | if (le->le_ops->lo_add) | 58 | if (bd->bd_ops->lo_add) |
57 | le->le_ops->lo_add(sdp, le); | 59 | bd->bd_ops->lo_add(sdp, bd); |
58 | } | 60 | } |
59 | 61 | ||
60 | static inline void lops_before_commit(struct gfs2_sbd *sdp) | 62 | static inline void lops_before_commit(struct gfs2_sbd *sdp) |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 754426b1e52c..6cdb0f2a1b09 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -70,16 +70,6 @@ static void gfs2_init_gl_aspace_once(void *foo) | |||
70 | address_space_init_once(mapping); | 70 | address_space_init_once(mapping); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void *gfs2_bh_alloc(gfp_t mask, void *data) | ||
74 | { | ||
75 | return alloc_buffer_head(mask); | ||
76 | } | ||
77 | |||
78 | static void gfs2_bh_free(void *ptr, void *data) | ||
79 | { | ||
80 | return free_buffer_head(ptr); | ||
81 | } | ||
82 | |||
83 | /** | 73 | /** |
84 | * init_gfs2_fs - Register GFS2 as a filesystem | 74 | * init_gfs2_fs - Register GFS2 as a filesystem |
85 | * | 75 | * |
@@ -143,6 +133,12 @@ static int __init init_gfs2_fs(void) | |||
143 | if (!gfs2_quotad_cachep) | 133 | if (!gfs2_quotad_cachep) |
144 | goto fail; | 134 | goto fail; |
145 | 135 | ||
136 | gfs2_rsrv_cachep = kmem_cache_create("gfs2_mblk", | ||
137 | sizeof(struct gfs2_blkreserv), | ||
138 | 0, 0, NULL); | ||
139 | if (!gfs2_rsrv_cachep) | ||
140 | goto fail; | ||
141 | |||
146 | register_shrinker(&qd_shrinker); | 142 | register_shrinker(&qd_shrinker); |
147 | 143 | ||
148 | error = register_filesystem(&gfs2_fs_type); | 144 | error = register_filesystem(&gfs2_fs_type); |
@@ -164,8 +160,8 @@ static int __init init_gfs2_fs(void) | |||
164 | if (!gfs2_control_wq) | 160 | if (!gfs2_control_wq) |
165 | goto fail_recovery; | 161 | goto fail_recovery; |
166 | 162 | ||
167 | gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL); | 163 | gfs2_page_pool = mempool_create_page_pool(64, 0); |
168 | if (!gfs2_bh_pool) | 164 | if (!gfs2_page_pool) |
169 | goto fail_control; | 165 | goto fail_control; |
170 | 166 | ||
171 | gfs2_register_debugfs(); | 167 | gfs2_register_debugfs(); |
@@ -186,6 +182,9 @@ fail: | |||
186 | unregister_shrinker(&qd_shrinker); | 182 | unregister_shrinker(&qd_shrinker); |
187 | gfs2_glock_exit(); | 183 | gfs2_glock_exit(); |
188 | 184 | ||
185 | if (gfs2_rsrv_cachep) | ||
186 | kmem_cache_destroy(gfs2_rsrv_cachep); | ||
187 | |||
189 | if (gfs2_quotad_cachep) | 188 | if (gfs2_quotad_cachep) |
190 | kmem_cache_destroy(gfs2_quotad_cachep); | 189 | kmem_cache_destroy(gfs2_quotad_cachep); |
191 | 190 | ||
@@ -225,7 +224,8 @@ static void __exit exit_gfs2_fs(void) | |||
225 | 224 | ||
226 | rcu_barrier(); | 225 | rcu_barrier(); |
227 | 226 | ||
228 | mempool_destroy(gfs2_bh_pool); | 227 | mempool_destroy(gfs2_page_pool); |
228 | kmem_cache_destroy(gfs2_rsrv_cachep); | ||
229 | kmem_cache_destroy(gfs2_quotad_cachep); | 229 | kmem_cache_destroy(gfs2_quotad_cachep); |
230 | kmem_cache_destroy(gfs2_rgrpd_cachep); | 230 | kmem_cache_destroy(gfs2_rgrpd_cachep); |
231 | kmem_cache_destroy(gfs2_bufdata_cachep); | 231 | kmem_cache_destroy(gfs2_bufdata_cachep); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 181586e673f9..6c1e5d1c404a 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -293,11 +293,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, | |||
293 | bd->bd_bh = bh; | 293 | bd->bd_bh = bh; |
294 | bd->bd_gl = gl; | 294 | bd->bd_gl = gl; |
295 | 295 | ||
296 | INIT_LIST_HEAD(&bd->bd_list_tr); | ||
297 | if (meta) | 296 | if (meta) |
298 | lops_init_le(&bd->bd_le, &gfs2_buf_lops); | 297 | lops_init_le(bd, &gfs2_buf_lops); |
299 | else | 298 | else |
300 | lops_init_le(&bd->bd_le, &gfs2_databuf_lops); | 299 | lops_init_le(bd, &gfs2_databuf_lops); |
301 | bh->b_private = bd; | 300 | bh->b_private = bd; |
302 | 301 | ||
303 | if (meta) | 302 | if (meta) |
@@ -313,7 +312,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
313 | if (test_clear_buffer_pinned(bh)) { | 312 | if (test_clear_buffer_pinned(bh)) { |
314 | trace_gfs2_pin(bd, 0); | 313 | trace_gfs2_pin(bd, 0); |
315 | atomic_dec(&sdp->sd_log_pinned); | 314 | atomic_dec(&sdp->sd_log_pinned); |
316 | list_del_init(&bd->bd_le.le_list); | 315 | list_del_init(&bd->bd_list); |
317 | if (meta) { | 316 | if (meta) { |
318 | gfs2_assert_warn(sdp, sdp->sd_log_num_buf); | 317 | gfs2_assert_warn(sdp, sdp->sd_log_num_buf); |
319 | sdp->sd_log_num_buf--; | 318 | sdp->sd_log_num_buf--; |
@@ -375,33 +374,24 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) | |||
375 | * @ip: The GFS2 inode | 374 | * @ip: The GFS2 inode |
376 | * @height: The level of this buf in the metadata (indir addr) tree (if any) | 375 | * @height: The level of this buf in the metadata (indir addr) tree (if any) |
377 | * @num: The block number (device relative) of the buffer | 376 | * @num: The block number (device relative) of the buffer |
378 | * @new: Non-zero if we may create a new buffer | ||
379 | * @bhp: the buffer is returned here | 377 | * @bhp: the buffer is returned here |
380 | * | 378 | * |
381 | * Returns: errno | 379 | * Returns: errno |
382 | */ | 380 | */ |
383 | 381 | ||
384 | int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, | 382 | int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, |
385 | int new, struct buffer_head **bhp) | 383 | struct buffer_head **bhp) |
386 | { | 384 | { |
387 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 385 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
388 | struct gfs2_glock *gl = ip->i_gl; | 386 | struct gfs2_glock *gl = ip->i_gl; |
389 | struct buffer_head *bh; | 387 | struct buffer_head *bh; |
390 | int ret = 0; | 388 | int ret = 0; |
389 | u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI; | ||
391 | 390 | ||
392 | if (new) { | 391 | ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh); |
393 | BUG_ON(height == 0); | 392 | if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { |
394 | bh = gfs2_meta_new(gl, num); | 393 | brelse(bh); |
395 | gfs2_trans_add_bh(ip->i_gl, bh, 1); | 394 | ret = -EIO; |
396 | gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); | ||
397 | gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); | ||
398 | } else { | ||
399 | u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI; | ||
400 | ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh); | ||
401 | if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { | ||
402 | brelse(bh); | ||
403 | ret = -EIO; | ||
404 | } | ||
405 | } | 395 | } |
406 | *bhp = bh; | 396 | *bhp = bh; |
407 | return ret; | 397 | return ret; |
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index 22c526593131..c30973b07a7c 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h | |||
@@ -65,12 +65,12 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, | |||
65 | void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen); | 65 | void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen); |
66 | 66 | ||
67 | int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, | 67 | int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, |
68 | int new, struct buffer_head **bhp); | 68 | struct buffer_head **bhp); |
69 | 69 | ||
70 | static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip, | 70 | static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip, |
71 | struct buffer_head **bhp) | 71 | struct buffer_head **bhp) |
72 | { | 72 | { |
73 | return gfs2_meta_indirect_buffer(ip, 0, ip->i_no_addr, 0, bhp); | 73 | return gfs2_meta_indirect_buffer(ip, 0, ip->i_no_addr, bhp); |
74 | } | 74 | } |
75 | 75 | ||
76 | struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); | 76 | struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 6f3a18f9e176..b8c250fc4922 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -99,7 +99,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
99 | atomic_set(&sdp->sd_log_pinned, 0); | 99 | atomic_set(&sdp->sd_log_pinned, 0); |
100 | INIT_LIST_HEAD(&sdp->sd_log_le_buf); | 100 | INIT_LIST_HEAD(&sdp->sd_log_le_buf); |
101 | INIT_LIST_HEAD(&sdp->sd_log_le_revoke); | 101 | INIT_LIST_HEAD(&sdp->sd_log_le_revoke); |
102 | INIT_LIST_HEAD(&sdp->sd_log_le_rg); | ||
103 | INIT_LIST_HEAD(&sdp->sd_log_le_databuf); | 102 | INIT_LIST_HEAD(&sdp->sd_log_le_databuf); |
104 | INIT_LIST_HEAD(&sdp->sd_log_le_ordered); | 103 | INIT_LIST_HEAD(&sdp->sd_log_le_ordered); |
105 | 104 | ||
@@ -994,6 +993,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) | |||
994 | ls->ls_jid = option; | 993 | ls->ls_jid = option; |
995 | break; | 994 | break; |
996 | case Opt_id: | 995 | case Opt_id: |
996 | case Opt_nodir: | ||
997 | /* Obsolete, but left for backward compat purposes */ | 997 | /* Obsolete, but left for backward compat purposes */ |
998 | break; | 998 | break; |
999 | case Opt_first: | 999 | case Opt_first: |
@@ -1002,12 +1002,6 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) | |||
1002 | goto hostdata_error; | 1002 | goto hostdata_error; |
1003 | ls->ls_first = option; | 1003 | ls->ls_first = option; |
1004 | break; | 1004 | break; |
1005 | case Opt_nodir: | ||
1006 | ret = match_int(&tmp[0], &option); | ||
1007 | if (ret || (option != 0 && option != 1)) | ||
1008 | goto hostdata_error; | ||
1009 | ls->ls_nodir = option; | ||
1010 | break; | ||
1011 | case Opt_err: | 1005 | case Opt_err: |
1012 | default: | 1006 | default: |
1013 | hostdata_error: | 1007 | hostdata_error: |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6019da3dcaed..b97178e7d397 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -652,7 +652,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
652 | } | 652 | } |
653 | 653 | ||
654 | memset(&q, 0, sizeof(struct gfs2_quota)); | 654 | memset(&q, 0, sizeof(struct gfs2_quota)); |
655 | err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q)); | 655 | err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); |
656 | if (err < 0) | 656 | if (err < 0) |
657 | return err; | 657 | return err; |
658 | 658 | ||
@@ -744,7 +744,7 @@ get_a_page: | |||
744 | i_size_write(inode, size); | 744 | i_size_write(inode, size); |
745 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | 745 | inode->i_mtime = inode->i_atime = CURRENT_TIME; |
746 | mark_inode_dirty(inode); | 746 | mark_inode_dirty(inode); |
747 | return err; | 747 | return 0; |
748 | 748 | ||
749 | unlock_out: | 749 | unlock_out: |
750 | unlock_page(page); | 750 | unlock_page(page); |
@@ -852,7 +852,7 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) | |||
852 | 852 | ||
853 | memset(&q, 0, sizeof(struct gfs2_quota)); | 853 | memset(&q, 0, sizeof(struct gfs2_quota)); |
854 | pos = qd2offset(qd); | 854 | pos = qd2offset(qd); |
855 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | 855 | error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); |
856 | if (error < 0) | 856 | if (error < 0) |
857 | return error; | 857 | return error; |
858 | 858 | ||
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 3df65c9ab73b..f74fb9bd1973 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -70,15 +70,15 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, | |||
70 | 70 | ||
71 | /** | 71 | /** |
72 | * gfs2_setbit - Set a bit in the bitmaps | 72 | * gfs2_setbit - Set a bit in the bitmaps |
73 | * @buffer: the buffer that holds the bitmaps | 73 | * @rgd: the resource group descriptor |
74 | * @buflen: the length (in bytes) of the buffer | 74 | * @buf2: the clone buffer that holds the bitmaps |
75 | * @bi: the bitmap structure | ||
75 | * @block: the block to set | 76 | * @block: the block to set |
76 | * @new_state: the new state of the block | 77 | * @new_state: the new state of the block |
77 | * | 78 | * |
78 | */ | 79 | */ |
79 | 80 | ||
80 | static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1, | 81 | static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2, |
81 | unsigned char *buf2, unsigned int offset, | ||
82 | struct gfs2_bitmap *bi, u32 block, | 82 | struct gfs2_bitmap *bi, u32 block, |
83 | unsigned char new_state) | 83 | unsigned char new_state) |
84 | { | 84 | { |
@@ -86,8 +86,8 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1, | |||
86 | unsigned int buflen = bi->bi_len; | 86 | unsigned int buflen = bi->bi_len; |
87 | const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; | 87 | const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; |
88 | 88 | ||
89 | byte1 = buf1 + offset + (block / GFS2_NBBY); | 89 | byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY); |
90 | end = buf1 + offset + buflen; | 90 | end = bi->bi_bh->b_data + bi->bi_offset + buflen; |
91 | 91 | ||
92 | BUG_ON(byte1 >= end); | 92 | BUG_ON(byte1 >= end); |
93 | 93 | ||
@@ -110,7 +110,7 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1, | |||
110 | *byte1 ^= (cur_state ^ new_state) << bit; | 110 | *byte1 ^= (cur_state ^ new_state) << bit; |
111 | 111 | ||
112 | if (buf2) { | 112 | if (buf2) { |
113 | byte2 = buf2 + offset + (block / GFS2_NBBY); | 113 | byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY); |
114 | cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; | 114 | cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; |
115 | *byte2 ^= (cur_state ^ new_state) << bit; | 115 | *byte2 ^= (cur_state ^ new_state) << bit; |
116 | } | 116 | } |
@@ -118,6 +118,7 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1, | |||
118 | 118 | ||
119 | /** | 119 | /** |
120 | * gfs2_testbit - test a bit in the bitmaps | 120 | * gfs2_testbit - test a bit in the bitmaps |
121 | * @rgd: the resource group descriptor | ||
121 | * @buffer: the buffer that holds the bitmaps | 122 | * @buffer: the buffer that holds the bitmaps |
122 | * @buflen: the length (in bytes) of the buffer | 123 | * @buflen: the length (in bytes) of the buffer |
123 | * @block: the block to read | 124 | * @block: the block to read |
@@ -179,7 +180,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) | |||
179 | /** | 180 | /** |
180 | * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing | 181 | * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing |
181 | * a block in a given allocation state. | 182 | * a block in a given allocation state. |
182 | * @buffer: the buffer that holds the bitmaps | 183 | * @buf: the buffer that holds the bitmaps |
183 | * @len: the length (in bytes) of the buffer | 184 | * @len: the length (in bytes) of the buffer |
184 | * @goal: start search at this block's bit-pair (within @buffer) | 185 | * @goal: start search at this block's bit-pair (within @buffer) |
185 | * @state: GFS2_BLKST_XXX the state of the block we're looking for. | 186 | * @state: GFS2_BLKST_XXX the state of the block we're looking for. |
@@ -231,6 +232,7 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, | |||
231 | 232 | ||
232 | /** | 233 | /** |
233 | * gfs2_bitcount - count the number of bits in a certain state | 234 | * gfs2_bitcount - count the number of bits in a certain state |
235 | * @rgd: the resource group descriptor | ||
234 | * @buffer: the buffer that holds the bitmaps | 236 | * @buffer: the buffer that holds the bitmaps |
235 | * @buflen: the length (in bytes) of the buffer | 237 | * @buflen: the length (in bytes) of the buffer |
236 | * @state: the state of the block we're looking for | 238 | * @state: the state of the block we're looking for |
@@ -264,7 +266,6 @@ static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer, | |||
264 | 266 | ||
265 | /** | 267 | /** |
266 | * gfs2_rgrp_verify - Verify that a resource group is consistent | 268 | * gfs2_rgrp_verify - Verify that a resource group is consistent |
267 | * @sdp: the filesystem | ||
268 | * @rgd: the rgrp | 269 | * @rgd: the rgrp |
269 | * | 270 | * |
270 | */ | 271 | */ |
@@ -322,7 +323,8 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) | |||
322 | /** | 323 | /** |
323 | * gfs2_blk2rgrpd - Find resource group for a given data/meta block number | 324 | * gfs2_blk2rgrpd - Find resource group for a given data/meta block number |
324 | * @sdp: The GFS2 superblock | 325 | * @sdp: The GFS2 superblock |
325 | * @n: The data block number | 326 | * @blk: The data block number |
327 | * @exact: True if this needs to be an exact match | ||
326 | * | 328 | * |
327 | * Returns: The resource group, or NULL if not found | 329 | * Returns: The resource group, or NULL if not found |
328 | */ | 330 | */ |
@@ -380,7 +382,7 @@ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) | |||
380 | 382 | ||
381 | /** | 383 | /** |
382 | * gfs2_rgrpd_get_next - get the next RG | 384 | * gfs2_rgrpd_get_next - get the next RG |
383 | * @rgd: A RG | 385 | * @rgd: the resource group descriptor |
384 | * | 386 | * |
385 | * Returns: The next rgrp | 387 | * Returns: The next rgrp |
386 | */ | 388 | */ |
@@ -529,6 +531,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) | |||
529 | 531 | ||
530 | /** | 532 | /** |
531 | * gfs2_ri_total - Total up the file system space, according to the rindex. | 533 | * gfs2_ri_total - Total up the file system space, according to the rindex. |
534 | * @sdp: the filesystem | ||
532 | * | 535 | * |
533 | */ | 536 | */ |
534 | u64 gfs2_ri_total(struct gfs2_sbd *sdp) | 537 | u64 gfs2_ri_total(struct gfs2_sbd *sdp) |
@@ -537,16 +540,14 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) | |||
537 | struct inode *inode = sdp->sd_rindex; | 540 | struct inode *inode = sdp->sd_rindex; |
538 | struct gfs2_inode *ip = GFS2_I(inode); | 541 | struct gfs2_inode *ip = GFS2_I(inode); |
539 | char buf[sizeof(struct gfs2_rindex)]; | 542 | char buf[sizeof(struct gfs2_rindex)]; |
540 | struct file_ra_state ra_state; | ||
541 | int error, rgrps; | 543 | int error, rgrps; |
542 | 544 | ||
543 | file_ra_state_init(&ra_state, inode->i_mapping); | ||
544 | for (rgrps = 0;; rgrps++) { | 545 | for (rgrps = 0;; rgrps++) { |
545 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); | 546 | loff_t pos = rgrps * sizeof(struct gfs2_rindex); |
546 | 547 | ||
547 | if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) | 548 | if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) |
548 | break; | 549 | break; |
549 | error = gfs2_internal_read(ip, &ra_state, buf, &pos, | 550 | error = gfs2_internal_read(ip, buf, &pos, |
550 | sizeof(struct gfs2_rindex)); | 551 | sizeof(struct gfs2_rindex)); |
551 | if (error != sizeof(struct gfs2_rindex)) | 552 | if (error != sizeof(struct gfs2_rindex)) |
552 | break; | 553 | break; |
@@ -582,13 +583,12 @@ static int rgd_insert(struct gfs2_rgrpd *rgd) | |||
582 | 583 | ||
583 | /** | 584 | /** |
584 | * read_rindex_entry - Pull in a new resource index entry from the disk | 585 | * read_rindex_entry - Pull in a new resource index entry from the disk |
585 | * @gl: The glock covering the rindex inode | 586 | * @ip: Pointer to the rindex inode |
586 | * | 587 | * |
587 | * Returns: 0 on success, > 0 on EOF, error code otherwise | 588 | * Returns: 0 on success, > 0 on EOF, error code otherwise |
588 | */ | 589 | */ |
589 | 590 | ||
590 | static int read_rindex_entry(struct gfs2_inode *ip, | 591 | static int read_rindex_entry(struct gfs2_inode *ip) |
591 | struct file_ra_state *ra_state) | ||
592 | { | 592 | { |
593 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 593 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
594 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); | 594 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); |
@@ -599,7 +599,7 @@ static int read_rindex_entry(struct gfs2_inode *ip, | |||
599 | if (pos >= i_size_read(&ip->i_inode)) | 599 | if (pos >= i_size_read(&ip->i_inode)) |
600 | return 1; | 600 | return 1; |
601 | 601 | ||
602 | error = gfs2_internal_read(ip, ra_state, (char *)&buf, &pos, | 602 | error = gfs2_internal_read(ip, (char *)&buf, &pos, |
603 | sizeof(struct gfs2_rindex)); | 603 | sizeof(struct gfs2_rindex)); |
604 | 604 | ||
605 | if (error != sizeof(struct gfs2_rindex)) | 605 | if (error != sizeof(struct gfs2_rindex)) |
@@ -655,13 +655,10 @@ fail: | |||
655 | static int gfs2_ri_update(struct gfs2_inode *ip) | 655 | static int gfs2_ri_update(struct gfs2_inode *ip) |
656 | { | 656 | { |
657 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 657 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
658 | struct inode *inode = &ip->i_inode; | ||
659 | struct file_ra_state ra_state; | ||
660 | int error; | 658 | int error; |
661 | 659 | ||
662 | file_ra_state_init(&ra_state, inode->i_mapping); | ||
663 | do { | 660 | do { |
664 | error = read_rindex_entry(ip, &ra_state); | 661 | error = read_rindex_entry(ip); |
665 | } while (error == 0); | 662 | } while (error == 0); |
666 | 663 | ||
667 | if (error < 0) | 664 | if (error < 0) |
@@ -741,7 +738,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) | |||
741 | 738 | ||
742 | /** | 739 | /** |
743 | * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps | 740 | * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps |
744 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 741 | * @gh: The glock holder for the resource group |
745 | * | 742 | * |
746 | * Read in all of a Resource Group's header and bitmap blocks. | 743 | * Read in all of a Resource Group's header and bitmap blocks. |
747 | * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. | 744 | * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. |
@@ -801,7 +798,7 @@ fail: | |||
801 | 798 | ||
802 | /** | 799 | /** |
803 | * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() | 800 | * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() |
804 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 801 | * @gh: The glock holder for the resource group |
805 | * | 802 | * |
806 | */ | 803 | */ |
807 | 804 | ||
@@ -1002,11 +999,13 @@ struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip) | |||
1002 | * Returns: the struct gfs2_qadata | 999 | * Returns: the struct gfs2_qadata |
1003 | */ | 1000 | */ |
1004 | 1001 | ||
1005 | static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip) | 1002 | static int gfs2_blkrsv_get(struct gfs2_inode *ip) |
1006 | { | 1003 | { |
1007 | BUG_ON(ip->i_res != NULL); | 1004 | BUG_ON(ip->i_res != NULL); |
1008 | ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS); | 1005 | ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); |
1009 | return ip->i_res; | 1006 | if (!ip->i_res) |
1007 | return -ENOMEM; | ||
1008 | return 0; | ||
1010 | } | 1009 | } |
1011 | 1010 | ||
1012 | /** | 1011 | /** |
@@ -1038,6 +1037,8 @@ static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk) | |||
1038 | /** | 1037 | /** |
1039 | * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes | 1038 | * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes |
1040 | * @rgd: The rgrp | 1039 | * @rgd: The rgrp |
1040 | * @last_unlinked: block address of the last dinode we unlinked | ||
1041 | * @skip: block address we should explicitly not unlink | ||
1041 | * | 1042 | * |
1042 | * Returns: 0 if no error | 1043 | * Returns: 0 if no error |
1043 | * The inode, if one has been found, in inode. | 1044 | * The inode, if one has been found, in inode. |
@@ -1102,7 +1103,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip | |||
1102 | /** | 1103 | /** |
1103 | * get_local_rgrp - Choose and lock a rgrp for allocation | 1104 | * get_local_rgrp - Choose and lock a rgrp for allocation |
1104 | * @ip: the inode to reserve space for | 1105 | * @ip: the inode to reserve space for |
1105 | * @rgp: the chosen and locked rgrp | 1106 | * @last_unlinked: the last unlinked block |
1106 | * | 1107 | * |
1107 | * Try to acquire rgrp in way which avoids contending with others. | 1108 | * Try to acquire rgrp in way which avoids contending with others. |
1108 | * | 1109 | * |
@@ -1164,13 +1165,14 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
1164 | static void gfs2_blkrsv_put(struct gfs2_inode *ip) | 1165 | static void gfs2_blkrsv_put(struct gfs2_inode *ip) |
1165 | { | 1166 | { |
1166 | BUG_ON(ip->i_res == NULL); | 1167 | BUG_ON(ip->i_res == NULL); |
1167 | kfree(ip->i_res); | 1168 | kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); |
1168 | ip->i_res = NULL; | 1169 | ip->i_res = NULL; |
1169 | } | 1170 | } |
1170 | 1171 | ||
1171 | /** | 1172 | /** |
1172 | * gfs2_inplace_reserve - Reserve space in the filesystem | 1173 | * gfs2_inplace_reserve - Reserve space in the filesystem |
1173 | * @ip: the inode to reserve space for | 1174 | * @ip: the inode to reserve space for |
1175 | * @requested: the number of blocks to be reserved | ||
1174 | * | 1176 | * |
1175 | * Returns: errno | 1177 | * Returns: errno |
1176 | */ | 1178 | */ |
@@ -1179,14 +1181,15 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) | |||
1179 | { | 1181 | { |
1180 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1182 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1181 | struct gfs2_blkreserv *rs; | 1183 | struct gfs2_blkreserv *rs; |
1182 | int error = 0; | 1184 | int error; |
1183 | u64 last_unlinked = NO_BLOCK; | 1185 | u64 last_unlinked = NO_BLOCK; |
1184 | int tries = 0; | 1186 | int tries = 0; |
1185 | 1187 | ||
1186 | rs = gfs2_blkrsv_get(ip); | 1188 | error = gfs2_blkrsv_get(ip); |
1187 | if (!rs) | 1189 | if (error) |
1188 | return -ENOMEM; | 1190 | return error; |
1189 | 1191 | ||
1192 | rs = ip->i_res; | ||
1190 | rs->rs_requested = requested; | 1193 | rs->rs_requested = requested; |
1191 | if (gfs2_assert_warn(sdp, requested)) { | 1194 | if (gfs2_assert_warn(sdp, requested)) { |
1192 | error = -EINVAL; | 1195 | error = -EINVAL; |
@@ -1268,7 +1271,6 @@ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) | |||
1268 | * @rgd: the resource group descriptor | 1271 | * @rgd: the resource group descriptor |
1269 | * @goal: the goal block within the RG (start here to search for avail block) | 1272 | * @goal: the goal block within the RG (start here to search for avail block) |
1270 | * @state: GFS2_BLKST_XXX the before-allocation state to find | 1273 | * @state: GFS2_BLKST_XXX the before-allocation state to find |
1271 | * @dinode: TRUE if the first block we allocate is for a dinode | ||
1272 | * @rbi: address of the pointer to the bitmap containing the block found | 1274 | * @rbi: address of the pointer to the bitmap containing the block found |
1273 | * | 1275 | * |
1274 | * Walk rgrp's bitmap to find bits that represent a block in @state. | 1276 | * Walk rgrp's bitmap to find bits that represent a block in @state. |
@@ -1282,13 +1284,12 @@ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) | |||
1282 | * Returns: the block number found relative to the bitmap rbi | 1284 | * Returns: the block number found relative to the bitmap rbi |
1283 | */ | 1285 | */ |
1284 | 1286 | ||
1285 | static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, | 1287 | static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state, |
1286 | unsigned char state, | ||
1287 | struct gfs2_bitmap **rbi) | 1288 | struct gfs2_bitmap **rbi) |
1288 | { | 1289 | { |
1289 | struct gfs2_bitmap *bi = NULL; | 1290 | struct gfs2_bitmap *bi = NULL; |
1290 | const u32 length = rgd->rd_length; | 1291 | const u32 length = rgd->rd_length; |
1291 | u32 blk = BFITNOENT; | 1292 | u32 biblk = BFITNOENT; |
1292 | unsigned int buf, x; | 1293 | unsigned int buf, x; |
1293 | const u8 *buffer = NULL; | 1294 | const u8 *buffer = NULL; |
1294 | 1295 | ||
@@ -1325,8 +1326,8 @@ do_search: | |||
1325 | if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) | 1326 | if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) |
1326 | buffer = bi->bi_clone + bi->bi_offset; | 1327 | buffer = bi->bi_clone + bi->bi_offset; |
1327 | 1328 | ||
1328 | blk = gfs2_bitfit(buffer, bi->bi_len, goal, state); | 1329 | biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state); |
1329 | if (blk != BFITNOENT) | 1330 | if (biblk != BFITNOENT) |
1330 | break; | 1331 | break; |
1331 | 1332 | ||
1332 | if ((goal == 0) && (state == GFS2_BLKST_FREE)) | 1333 | if ((goal == 0) && (state == GFS2_BLKST_FREE)) |
@@ -1339,10 +1340,10 @@ skip: | |||
1339 | goal = 0; | 1340 | goal = 0; |
1340 | } | 1341 | } |
1341 | 1342 | ||
1342 | if (blk != BFITNOENT) | 1343 | if (biblk != BFITNOENT) |
1343 | *rbi = bi; | 1344 | *rbi = bi; |
1344 | 1345 | ||
1345 | return blk; | 1346 | return biblk; |
1346 | } | 1347 | } |
1347 | 1348 | ||
1348 | /** | 1349 | /** |
@@ -1367,8 +1368,8 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi, | |||
1367 | *n = 0; | 1368 | *n = 0; |
1368 | buffer = bi->bi_bh->b_data + bi->bi_offset; | 1369 | buffer = bi->bi_bh->b_data + bi->bi_offset; |
1369 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); | 1370 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); |
1370 | gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset, | 1371 | gfs2_setbit(rgd, bi->bi_clone, bi, blk, |
1371 | bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); | 1372 | dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); |
1372 | (*n)++; | 1373 | (*n)++; |
1373 | goal = blk; | 1374 | goal = blk; |
1374 | while (*n < elen) { | 1375 | while (*n < elen) { |
@@ -1378,8 +1379,7 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi, | |||
1378 | if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) != | 1379 | if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) != |
1379 | GFS2_BLKST_FREE) | 1380 | GFS2_BLKST_FREE) |
1380 | break; | 1381 | break; |
1381 | gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset, | 1382 | gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED); |
1382 | bi, goal, GFS2_BLKST_USED); | ||
1383 | (*n)++; | 1383 | (*n)++; |
1384 | } | 1384 | } |
1385 | blk = gfs2_bi2rgd_blk(bi, blk); | 1385 | blk = gfs2_bi2rgd_blk(bi, blk); |
@@ -1436,8 +1436,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, | |||
1436 | bi->bi_len); | 1436 | bi->bi_len); |
1437 | } | 1437 | } |
1438 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); | 1438 | gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); |
1439 | gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset, | 1439 | gfs2_setbit(rgd, NULL, bi, buf_blk, new_state); |
1440 | bi, buf_blk, new_state); | ||
1441 | } | 1440 | } |
1442 | 1441 | ||
1443 | return rgd; | 1442 | return rgd; |
@@ -1557,7 +1556,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, | |||
1557 | ip->i_inode.i_gid); | 1556 | ip->i_inode.i_gid); |
1558 | 1557 | ||
1559 | rgd->rd_free_clone -= *nblocks; | 1558 | rgd->rd_free_clone -= *nblocks; |
1560 | trace_gfs2_block_alloc(ip, block, *nblocks, | 1559 | trace_gfs2_block_alloc(ip, rgd, block, *nblocks, |
1561 | dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); | 1560 | dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); |
1562 | *bn = block; | 1561 | *bn = block; |
1563 | return 0; | 1562 | return 0; |
@@ -1584,7 +1583,7 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) | |||
1584 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); | 1583 | rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); |
1585 | if (!rgd) | 1584 | if (!rgd) |
1586 | return; | 1585 | return; |
1587 | trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); | 1586 | trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); |
1588 | rgd->rd_free += blen; | 1587 | rgd->rd_free += blen; |
1589 | rgd->rd_flags &= ~GFS2_RGF_TRIMMED; | 1588 | rgd->rd_flags &= ~GFS2_RGF_TRIMMED; |
1590 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1589 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
@@ -1622,7 +1621,7 @@ void gfs2_unlink_di(struct inode *inode) | |||
1622 | rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); | 1621 | rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); |
1623 | if (!rgd) | 1622 | if (!rgd) |
1624 | return; | 1623 | return; |
1625 | trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED); | 1624 | trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); |
1626 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1625 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1627 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); | 1626 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); |
1628 | } | 1627 | } |
@@ -1652,7 +1651,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) | |||
1652 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) | 1651 | void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) |
1653 | { | 1652 | { |
1654 | gfs2_free_uninit_di(rgd, ip->i_no_addr); | 1653 | gfs2_free_uninit_di(rgd, ip->i_no_addr); |
1655 | trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE); | 1654 | trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); |
1656 | gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1655 | gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1657 | gfs2_meta_wipe(ip, ip->i_no_addr, 1); | 1656 | gfs2_meta_wipe(ip, ip->i_no_addr, 1); |
1658 | } | 1657 | } |
@@ -1752,7 +1751,6 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, | |||
1752 | * and initialize an array of glock holders for them | 1751 | * and initialize an array of glock holders for them |
1753 | * @rlist: the list of resource groups | 1752 | * @rlist: the list of resource groups |
1754 | * @state: the lock state to acquire the RG lock in | 1753 | * @state: the lock state to acquire the RG lock in |
1755 | * @flags: the modifier flags for the holder structures | ||
1756 | * | 1754 | * |
1757 | * FIXME: Don't use NOFAIL | 1755 | * FIXME: Don't use NOFAIL |
1758 | * | 1756 | * |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index d33172c291ba..9c2592b1d5ff 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -368,10 +368,7 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) | |||
368 | struct gfs2_jdesc *jd; | 368 | struct gfs2_jdesc *jd; |
369 | int rv; | 369 | int rv; |
370 | 370 | ||
371 | rv = -ESHUTDOWN; | ||
372 | spin_lock(&sdp->sd_jindex_spin); | 371 | spin_lock(&sdp->sd_jindex_spin); |
373 | if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) | ||
374 | goto out; | ||
375 | rv = -EBUSY; | 372 | rv = -EBUSY; |
376 | if (sdp->sd_jdesc->jd_jid == jid) | 373 | if (sdp->sd_jdesc->jd_jid == jid) |
377 | goto out; | 374 | goto out; |
@@ -396,8 +393,13 @@ static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | |||
396 | if (rv != 1) | 393 | if (rv != 1) |
397 | return -EINVAL; | 394 | return -EINVAL; |
398 | 395 | ||
399 | rv = gfs2_recover_set(sdp, jid); | 396 | if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { |
397 | rv = -ESHUTDOWN; | ||
398 | goto out; | ||
399 | } | ||
400 | 400 | ||
401 | rv = gfs2_recover_set(sdp, jid); | ||
402 | out: | ||
401 | return rv ? rv : len; | 403 | return rv ? rv : len; |
402 | } | 404 | } |
403 | 405 | ||
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index dfa89cd75534..1b8b81588199 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h | |||
@@ -457,10 +457,10 @@ TRACE_EVENT(gfs2_bmap, | |||
457 | /* Keep track of blocks as they are allocated/freed */ | 457 | /* Keep track of blocks as they are allocated/freed */ |
458 | TRACE_EVENT(gfs2_block_alloc, | 458 | TRACE_EVENT(gfs2_block_alloc, |
459 | 459 | ||
460 | TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len, | 460 | TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, |
461 | u8 block_state), | 461 | u64 block, unsigned len, u8 block_state), |
462 | 462 | ||
463 | TP_ARGS(ip, block, len, block_state), | 463 | TP_ARGS(ip, rgd, block, len, block_state), |
464 | 464 | ||
465 | TP_STRUCT__entry( | 465 | TP_STRUCT__entry( |
466 | __field( dev_t, dev ) | 466 | __field( dev_t, dev ) |
@@ -468,6 +468,8 @@ TRACE_EVENT(gfs2_block_alloc, | |||
468 | __field( u64, inum ) | 468 | __field( u64, inum ) |
469 | __field( u32, len ) | 469 | __field( u32, len ) |
470 | __field( u8, block_state ) | 470 | __field( u8, block_state ) |
471 | __field( u64, rd_addr ) | ||
472 | __field( u32, rd_free_clone ) | ||
471 | ), | 473 | ), |
472 | 474 | ||
473 | TP_fast_assign( | 475 | TP_fast_assign( |
@@ -476,14 +478,18 @@ TRACE_EVENT(gfs2_block_alloc, | |||
476 | __entry->inum = ip->i_no_addr; | 478 | __entry->inum = ip->i_no_addr; |
477 | __entry->len = len; | 479 | __entry->len = len; |
478 | __entry->block_state = block_state; | 480 | __entry->block_state = block_state; |
481 | __entry->rd_addr = rgd->rd_addr; | ||
482 | __entry->rd_free_clone = rgd->rd_free_clone; | ||
479 | ), | 483 | ), |
480 | 484 | ||
481 | TP_printk("%u,%u bmap %llu alloc %llu/%lu %s", | 485 | TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u", |
482 | MAJOR(__entry->dev), MINOR(__entry->dev), | 486 | MAJOR(__entry->dev), MINOR(__entry->dev), |
483 | (unsigned long long)__entry->inum, | 487 | (unsigned long long)__entry->inum, |
484 | (unsigned long long)__entry->start, | 488 | (unsigned long long)__entry->start, |
485 | (unsigned long)__entry->len, | 489 | (unsigned long)__entry->len, |
486 | block_state_name(__entry->block_state)) | 490 | block_state_name(__entry->block_state), |
491 | (unsigned long long)__entry->rd_addr, | ||
492 | __entry->rd_free_clone) | ||
487 | ); | 493 | ); |
488 | 494 | ||
489 | #endif /* _TRACE_GFS2_H */ | 495 | #endif /* _TRACE_GFS2_H */ |
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 86ac75d99d31..ad3e2fb763d7 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c | |||
@@ -50,8 +50,6 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, | |||
50 | if (revokes) | 50 | if (revokes) |
51 | tr->tr_reserved += gfs2_struct2blk(sdp, revokes, | 51 | tr->tr_reserved += gfs2_struct2blk(sdp, revokes, |
52 | sizeof(u64)); | 52 | sizeof(u64)); |
53 | INIT_LIST_HEAD(&tr->tr_list_buf); | ||
54 | |||
55 | gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); | 53 | gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); |
56 | 54 | ||
57 | error = gfs2_glock_nq(&tr->tr_t_gh); | 55 | error = gfs2_glock_nq(&tr->tr_t_gh); |
@@ -93,10 +91,21 @@ static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | |||
93 | up_read(&sdp->sd_log_flush_lock); | 91 | up_read(&sdp->sd_log_flush_lock); |
94 | } | 92 | } |
95 | 93 | ||
94 | static void gfs2_print_trans(const struct gfs2_trans *tr) | ||
95 | { | ||
96 | print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); | ||
97 | printk(KERN_WARNING "GFS2: blocks=%u revokes=%u reserved=%u touched=%d\n", | ||
98 | tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched); | ||
99 | printk(KERN_WARNING "GFS2: Buf %u/%u Databuf %u/%u Revoke %u/%u\n", | ||
100 | tr->tr_num_buf_new, tr->tr_num_buf_rm, | ||
101 | tr->tr_num_databuf_new, tr->tr_num_databuf_rm, | ||
102 | tr->tr_num_revoke, tr->tr_num_revoke_rm); | ||
103 | } | ||
104 | |||
96 | void gfs2_trans_end(struct gfs2_sbd *sdp) | 105 | void gfs2_trans_end(struct gfs2_sbd *sdp) |
97 | { | 106 | { |
98 | struct gfs2_trans *tr = current->journal_info; | 107 | struct gfs2_trans *tr = current->journal_info; |
99 | 108 | s64 nbuf; | |
100 | BUG_ON(!tr); | 109 | BUG_ON(!tr); |
101 | current->journal_info = NULL; | 110 | current->journal_info = NULL; |
102 | 111 | ||
@@ -110,16 +119,13 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) | |||
110 | return; | 119 | return; |
111 | } | 120 | } |
112 | 121 | ||
113 | if (gfs2_assert_withdraw(sdp, tr->tr_num_buf <= tr->tr_blocks)) { | 122 | nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new; |
114 | fs_err(sdp, "tr_num_buf = %u, tr_blocks = %u ", | 123 | nbuf -= tr->tr_num_buf_rm; |
115 | tr->tr_num_buf, tr->tr_blocks); | 124 | nbuf -= tr->tr_num_databuf_rm; |
116 | print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); | 125 | |
117 | } | 126 | if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && |
118 | if (gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) { | 127 | (tr->tr_num_revoke <= tr->tr_revokes))) |
119 | fs_err(sdp, "tr_num_revoke = %u, tr_revokes = %u ", | 128 | gfs2_print_trans(tr); |
120 | tr->tr_num_revoke, tr->tr_revokes); | ||
121 | print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); | ||
122 | } | ||
123 | 129 | ||
124 | gfs2_log_commit(sdp, tr); | 130 | gfs2_log_commit(sdp, tr); |
125 | if (tr->tr_t_gh.gh_gl) { | 131 | if (tr->tr_t_gh.gh_gl) { |
@@ -152,16 +158,16 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) | |||
152 | gfs2_attach_bufdata(gl, bh, meta); | 158 | gfs2_attach_bufdata(gl, bh, meta); |
153 | bd = bh->b_private; | 159 | bd = bh->b_private; |
154 | } | 160 | } |
155 | lops_add(sdp, &bd->bd_le); | 161 | lops_add(sdp, bd); |
156 | } | 162 | } |
157 | 163 | ||
158 | void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) | 164 | void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
159 | { | 165 | { |
160 | BUG_ON(!list_empty(&bd->bd_le.le_list)); | 166 | BUG_ON(!list_empty(&bd->bd_list)); |
161 | BUG_ON(!list_empty(&bd->bd_ail_st_list)); | 167 | BUG_ON(!list_empty(&bd->bd_ail_st_list)); |
162 | BUG_ON(!list_empty(&bd->bd_ail_gl_list)); | 168 | BUG_ON(!list_empty(&bd->bd_ail_gl_list)); |
163 | lops_init_le(&bd->bd_le, &gfs2_revoke_lops); | 169 | lops_init_le(bd, &gfs2_revoke_lops); |
164 | lops_add(sdp, &bd->bd_le); | 170 | lops_add(sdp, bd); |
165 | } | 171 | } |
166 | 172 | ||
167 | void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) | 173 | void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) |
@@ -171,9 +177,9 @@ void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) | |||
171 | unsigned int n = len; | 177 | unsigned int n = len; |
172 | 178 | ||
173 | gfs2_log_lock(sdp); | 179 | gfs2_log_lock(sdp); |
174 | list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_le.le_list) { | 180 | list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_list) { |
175 | if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) { | 181 | if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) { |
176 | list_del_init(&bd->bd_le.le_list); | 182 | list_del_init(&bd->bd_list); |
177 | gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); | 183 | gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); |
178 | sdp->sd_log_num_revoke--; | 184 | sdp->sd_log_num_revoke--; |
179 | kmem_cache_free(gfs2_bufdata_cachep, bd); | 185 | kmem_cache_free(gfs2_bufdata_cachep, bd); |
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 9e7765e8e7b0..f00d7c5744f6 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c | |||
@@ -25,7 +25,8 @@ struct kmem_cache *gfs2_inode_cachep __read_mostly; | |||
25 | struct kmem_cache *gfs2_bufdata_cachep __read_mostly; | 25 | struct kmem_cache *gfs2_bufdata_cachep __read_mostly; |
26 | struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; | 26 | struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; |
27 | struct kmem_cache *gfs2_quotad_cachep __read_mostly; | 27 | struct kmem_cache *gfs2_quotad_cachep __read_mostly; |
28 | mempool_t *gfs2_bh_pool __read_mostly; | 28 | struct kmem_cache *gfs2_rsrv_cachep __read_mostly; |
29 | mempool_t *gfs2_page_pool __read_mostly; | ||
29 | 30 | ||
30 | void gfs2_assert_i(struct gfs2_sbd *sdp) | 31 | void gfs2_assert_i(struct gfs2_sbd *sdp) |
31 | { | 32 | { |
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index a4ce76c67dbb..3586b0dd6aa7 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h | |||
@@ -152,7 +152,8 @@ extern struct kmem_cache *gfs2_inode_cachep; | |||
152 | extern struct kmem_cache *gfs2_bufdata_cachep; | 152 | extern struct kmem_cache *gfs2_bufdata_cachep; |
153 | extern struct kmem_cache *gfs2_rgrpd_cachep; | 153 | extern struct kmem_cache *gfs2_rgrpd_cachep; |
154 | extern struct kmem_cache *gfs2_quotad_cachep; | 154 | extern struct kmem_cache *gfs2_quotad_cachep; |
155 | extern mempool_t *gfs2_bh_pool; | 155 | extern struct kmem_cache *gfs2_rsrv_cachep; |
156 | extern mempool_t *gfs2_page_pool; | ||
156 | 157 | ||
157 | static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, | 158 | static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, |
158 | unsigned int *p) | 159 | unsigned int *p) |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index ad271c70aa25..5a2dec2b064c 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -234,8 +234,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
234 | return 0; | 234 | return 0; |
235 | 235 | ||
236 | jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); | 236 | jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); |
237 | spin_lock(&c->erase_completion_lock); | ||
238 | mutex_lock(&c->alloc_sem); | 237 | mutex_lock(&c->alloc_sem); |
238 | spin_lock(&c->erase_completion_lock); | ||
239 | } | 239 | } |
240 | 240 | ||
241 | /* First, work out which block we're garbage-collecting */ | 241 | /* First, work out which block we're garbage-collecting */ |
diff --git a/fs/libfs.c b/fs/libfs.c index 18d08f5db53a..f86ec27a4230 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -68,7 +68,7 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct na | |||
68 | 68 | ||
69 | int dcache_dir_open(struct inode *inode, struct file *file) | 69 | int dcache_dir_open(struct inode *inode, struct file *file) |
70 | { | 70 | { |
71 | static struct qstr cursor_name = {.len = 1, .name = "."}; | 71 | static struct qstr cursor_name = QSTR_INIT(".", 1); |
72 | 72 | ||
73 | file->private_data = d_alloc(file->f_path.dentry, &cursor_name); | 73 | file->private_data = d_alloc(file->f_path.dentry, &cursor_name); |
74 | 74 | ||
@@ -225,7 +225,7 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, | |||
225 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); | 225 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); |
226 | struct dentry *dentry; | 226 | struct dentry *dentry; |
227 | struct inode *root; | 227 | struct inode *root; |
228 | struct qstr d_name = {.name = name, .len = strlen(name)}; | 228 | struct qstr d_name = QSTR_INIT(name, strlen(name)); |
229 | 229 | ||
230 | if (IS_ERR(s)) | 230 | if (IS_ERR(s)) |
231 | return ERR_CAST(s); | 231 | return ERR_CAST(s); |
diff --git a/fs/namei.c b/fs/namei.c index c42791914f82..f9e883c1b856 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -116,47 +116,37 @@ | |||
116 | * POSIX.1 2.4: an empty pathname is invalid (ENOENT). | 116 | * POSIX.1 2.4: an empty pathname is invalid (ENOENT). |
117 | * PATH_MAX includes the nul terminator --RR. | 117 | * PATH_MAX includes the nul terminator --RR. |
118 | */ | 118 | */ |
119 | static int do_getname(const char __user *filename, char *page) | ||
120 | { | ||
121 | int retval; | ||
122 | unsigned long len = PATH_MAX; | ||
123 | |||
124 | if (!segment_eq(get_fs(), KERNEL_DS)) { | ||
125 | if ((unsigned long) filename >= TASK_SIZE) | ||
126 | return -EFAULT; | ||
127 | if (TASK_SIZE - (unsigned long) filename < PATH_MAX) | ||
128 | len = TASK_SIZE - (unsigned long) filename; | ||
129 | } | ||
130 | |||
131 | retval = strncpy_from_user(page, filename, len); | ||
132 | if (retval > 0) { | ||
133 | if (retval < len) | ||
134 | return 0; | ||
135 | return -ENAMETOOLONG; | ||
136 | } else if (!retval) | ||
137 | retval = -ENOENT; | ||
138 | return retval; | ||
139 | } | ||
140 | |||
141 | static char *getname_flags(const char __user *filename, int flags, int *empty) | 119 | static char *getname_flags(const char __user *filename, int flags, int *empty) |
142 | { | 120 | { |
143 | char *result = __getname(); | 121 | char *result = __getname(), *err; |
144 | int retval; | 122 | int len; |
145 | 123 | ||
146 | if (!result) | 124 | if (unlikely(!result)) |
147 | return ERR_PTR(-ENOMEM); | 125 | return ERR_PTR(-ENOMEM); |
148 | 126 | ||
149 | retval = do_getname(filename, result); | 127 | len = strncpy_from_user(result, filename, PATH_MAX); |
150 | if (retval < 0) { | 128 | err = ERR_PTR(len); |
151 | if (retval == -ENOENT && empty) | 129 | if (unlikely(len < 0)) |
130 | goto error; | ||
131 | |||
132 | /* The empty path is special. */ | ||
133 | if (unlikely(!len)) { | ||
134 | if (empty) | ||
152 | *empty = 1; | 135 | *empty = 1; |
153 | if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) { | 136 | err = ERR_PTR(-ENOENT); |
154 | __putname(result); | 137 | if (!(flags & LOOKUP_EMPTY)) |
155 | return ERR_PTR(retval); | 138 | goto error; |
156 | } | 139 | } |
140 | |||
141 | err = ERR_PTR(-ENAMETOOLONG); | ||
142 | if (likely(len < PATH_MAX)) { | ||
143 | audit_getname(result); | ||
144 | return result; | ||
157 | } | 145 | } |
158 | audit_getname(result); | 146 | |
159 | return result; | 147 | error: |
148 | __putname(result); | ||
149 | return err; | ||
160 | } | 150 | } |
161 | 151 | ||
162 | char *getname(const char __user * filename) | 152 | char *getname(const char __user * filename) |
@@ -1154,12 +1144,25 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, | |||
1154 | */ | 1144 | */ |
1155 | if (nd->flags & LOOKUP_RCU) { | 1145 | if (nd->flags & LOOKUP_RCU) { |
1156 | unsigned seq; | 1146 | unsigned seq; |
1157 | *inode = nd->inode; | 1147 | dentry = __d_lookup_rcu(parent, name, &seq, nd->inode); |
1158 | dentry = __d_lookup_rcu(parent, name, &seq, inode); | ||
1159 | if (!dentry) | 1148 | if (!dentry) |
1160 | goto unlazy; | 1149 | goto unlazy; |
1161 | 1150 | ||
1162 | /* Memory barrier in read_seqcount_begin of child is enough */ | 1151 | /* |
1152 | * This sequence count validates that the inode matches | ||
1153 | * the dentry name information from lookup. | ||
1154 | */ | ||
1155 | *inode = dentry->d_inode; | ||
1156 | if (read_seqcount_retry(&dentry->d_seq, seq)) | ||
1157 | return -ECHILD; | ||
1158 | |||
1159 | /* | ||
1160 | * This sequence count validates that the parent had no | ||
1161 | * changes while we did the lookup of the dentry above. | ||
1162 | * | ||
1163 | * The memory barrier in read_seqcount_begin of child is | ||
1164 | * enough, we can use __read_seqcount_retry here. | ||
1165 | */ | ||
1163 | if (__read_seqcount_retry(&parent->d_seq, nd->seq)) | 1166 | if (__read_seqcount_retry(&parent->d_seq, nd->seq)) |
1164 | return -ECHILD; | 1167 | return -ECHILD; |
1165 | nd->seq = seq; | 1168 | nd->seq = seq; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8789210c6905..eedd24d0ad2e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -477,10 +477,7 @@ different: | |||
477 | static | 477 | static |
478 | void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) | 478 | void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) |
479 | { | 479 | { |
480 | struct qstr filename = { | 480 | struct qstr filename = QSTR_INIT(entry->name, entry->len); |
481 | .len = entry->len, | ||
482 | .name = entry->name, | ||
483 | }; | ||
484 | struct dentry *dentry; | 481 | struct dentry *dentry; |
485 | struct dentry *alias; | 482 | struct dentry *alias; |
486 | struct inode *dir = parent->d_inode; | 483 | struct inode *dir = parent->d_inode; |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 5242eae6711a..75c68299358e 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -398,8 +398,7 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name) | |||
398 | { | 398 | { |
399 | struct nfs_removeargs arg = { | 399 | struct nfs_removeargs arg = { |
400 | .fh = NFS_FH(dir), | 400 | .fh = NFS_FH(dir), |
401 | .name.len = name->len, | 401 | .name = *name, |
402 | .name.name = name->name, | ||
403 | }; | 402 | }; |
404 | struct nfs_removeres res; | 403 | struct nfs_removeres res; |
405 | struct rpc_message msg = { | 404 | struct rpc_message msg = { |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 99650aaf8937..ab985f6f0da8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2782,8 +2782,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) | |||
2782 | struct nfs_server *server = NFS_SERVER(dir); | 2782 | struct nfs_server *server = NFS_SERVER(dir); |
2783 | struct nfs_removeargs args = { | 2783 | struct nfs_removeargs args = { |
2784 | .fh = NFS_FH(dir), | 2784 | .fh = NFS_FH(dir), |
2785 | .name.len = name->len, | 2785 | .name = *name, |
2786 | .name.name = name->name, | ||
2787 | .bitmask = server->attr_bitmask, | 2786 | .bitmask = server->attr_bitmask, |
2788 | }; | 2787 | }; |
2789 | struct nfs_removeres res = { | 2788 | struct nfs_removeres res = { |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index b63b6f4d14fb..d6408b6437de 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -335,8 +335,7 @@ nfs_proc_remove(struct inode *dir, struct qstr *name) | |||
335 | { | 335 | { |
336 | struct nfs_removeargs arg = { | 336 | struct nfs_removeargs arg = { |
337 | .fh = NFS_FH(dir), | 337 | .fh = NFS_FH(dir), |
338 | .name.len = name->len, | 338 | .name = *name, |
339 | .name.name = name->name, | ||
340 | }; | 339 | }; |
341 | struct rpc_message msg = { | 340 | struct rpc_message msg = { |
342 | .rpc_proc = &nfs_procedures[NFSPROC_REMOVE], | 341 | .rpc_proc = &nfs_procedures[NFSPROC_REMOVE], |
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index fce2bbee66d4..0bb2c2010b95 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
@@ -441,7 +441,7 @@ static struct dentry *nilfs_get_parent(struct dentry *child) | |||
441 | { | 441 | { |
442 | unsigned long ino; | 442 | unsigned long ino; |
443 | struct inode *inode; | 443 | struct inode *inode; |
444 | struct qstr dotdot = {.name = "..", .len = 2}; | 444 | struct qstr dotdot = QSTR_INIT("..", 2); |
445 | struct nilfs_root *root; | 445 | struct nilfs_root *root; |
446 | 446 | ||
447 | ino = nilfs_inode_by_name(child->d_inode, &dotdot); | 447 | ino = nilfs_inode_by_name(child->d_inode, &dotdot); |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 044e7b58d31c..1bfe8802cc1e 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -2005,7 +2005,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) | |||
2005 | o2net_listen_sock = sock; | 2005 | o2net_listen_sock = sock; |
2006 | INIT_WORK(&o2net_listen_work, o2net_accept_many); | 2006 | INIT_WORK(&o2net_listen_work, o2net_accept_many); |
2007 | 2007 | ||
2008 | sock->sk->sk_reuse = 1; | 2008 | sock->sk->sk_reuse = SK_CAN_REUSE; |
2009 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | 2009 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
2010 | if (ret < 0) { | 2010 | if (ret < 0) { |
2011 | printk(KERN_ERR "o2net: Error %d while binding socket at " | 2011 | printk(KERN_ERR "o2net: Error %d while binding socket at " |
@@ -681,7 +681,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, | |||
681 | 681 | ||
682 | f->f_op = fops_get(inode->i_fop); | 682 | f->f_op = fops_get(inode->i_fop); |
683 | 683 | ||
684 | error = security_dentry_open(f, cred); | 684 | error = security_file_open(f, cred); |
685 | if (error) | 685 | if (error) |
686 | goto cleanup_all; | 686 | goto cleanup_all; |
687 | 687 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index 1c8b280146d7..57b8159f26f3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1799,10 +1799,15 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1799 | if (task) { | 1799 | if (task) { |
1800 | files = get_files_struct(task); | 1800 | files = get_files_struct(task); |
1801 | if (files) { | 1801 | if (files) { |
1802 | struct file *file; | ||
1802 | rcu_read_lock(); | 1803 | rcu_read_lock(); |
1803 | if (fcheck_files(files, fd)) { | 1804 | file = fcheck_files(files, fd); |
1805 | if (file) { | ||
1806 | unsigned i_mode, f_mode = file->f_mode; | ||
1807 | |||
1804 | rcu_read_unlock(); | 1808 | rcu_read_unlock(); |
1805 | put_files_struct(files); | 1809 | put_files_struct(files); |
1810 | |||
1806 | if (task_dumpable(task)) { | 1811 | if (task_dumpable(task)) { |
1807 | rcu_read_lock(); | 1812 | rcu_read_lock(); |
1808 | cred = __task_cred(task); | 1813 | cred = __task_cred(task); |
@@ -1813,7 +1818,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1813 | inode->i_uid = 0; | 1818 | inode->i_uid = 0; |
1814 | inode->i_gid = 0; | 1819 | inode->i_gid = 0; |
1815 | } | 1820 | } |
1816 | inode->i_mode &= ~(S_ISUID | S_ISGID); | 1821 | |
1822 | i_mode = S_IFLNK; | ||
1823 | if (f_mode & FMODE_READ) | ||
1824 | i_mode |= S_IRUSR | S_IXUSR; | ||
1825 | if (f_mode & FMODE_WRITE) | ||
1826 | i_mode |= S_IWUSR | S_IXUSR; | ||
1827 | inode->i_mode = i_mode; | ||
1828 | |||
1817 | security_task_to_inode(task, inode); | 1829 | security_task_to_inode(task, inode); |
1818 | put_task_struct(task); | 1830 | put_task_struct(task); |
1819 | return 1; | 1831 | return 1; |
@@ -1837,8 +1849,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
1837 | struct dentry *dentry, struct task_struct *task, const void *ptr) | 1849 | struct dentry *dentry, struct task_struct *task, const void *ptr) |
1838 | { | 1850 | { |
1839 | unsigned fd = *(const unsigned *)ptr; | 1851 | unsigned fd = *(const unsigned *)ptr; |
1840 | struct file *file; | ||
1841 | struct files_struct *files; | ||
1842 | struct inode *inode; | 1852 | struct inode *inode; |
1843 | struct proc_inode *ei; | 1853 | struct proc_inode *ei; |
1844 | struct dentry *error = ERR_PTR(-ENOENT); | 1854 | struct dentry *error = ERR_PTR(-ENOENT); |
@@ -1848,25 +1858,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
1848 | goto out; | 1858 | goto out; |
1849 | ei = PROC_I(inode); | 1859 | ei = PROC_I(inode); |
1850 | ei->fd = fd; | 1860 | ei->fd = fd; |
1851 | files = get_files_struct(task); | ||
1852 | if (!files) | ||
1853 | goto out_iput; | ||
1854 | inode->i_mode = S_IFLNK; | ||
1855 | |||
1856 | /* | ||
1857 | * We are not taking a ref to the file structure, so we must | ||
1858 | * hold ->file_lock. | ||
1859 | */ | ||
1860 | spin_lock(&files->file_lock); | ||
1861 | file = fcheck_files(files, fd); | ||
1862 | if (!file) | ||
1863 | goto out_unlock; | ||
1864 | if (file->f_mode & FMODE_READ) | ||
1865 | inode->i_mode |= S_IRUSR | S_IXUSR; | ||
1866 | if (file->f_mode & FMODE_WRITE) | ||
1867 | inode->i_mode |= S_IWUSR | S_IXUSR; | ||
1868 | spin_unlock(&files->file_lock); | ||
1869 | put_files_struct(files); | ||
1870 | 1861 | ||
1871 | inode->i_op = &proc_pid_link_inode_operations; | 1862 | inode->i_op = &proc_pid_link_inode_operations; |
1872 | inode->i_size = 64; | 1863 | inode->i_size = 64; |
@@ -1879,12 +1870,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
1879 | 1870 | ||
1880 | out: | 1871 | out: |
1881 | return error; | 1872 | return error; |
1882 | out_unlock: | ||
1883 | spin_unlock(&files->file_lock); | ||
1884 | put_files_struct(files); | ||
1885 | out_iput: | ||
1886 | iput(inode); | ||
1887 | goto out; | ||
1888 | } | 1873 | } |
1889 | 1874 | ||
1890 | static struct dentry *proc_lookupfd_common(struct inode *dir, | 1875 | static struct dentry *proc_lookupfd_common(struct inode *dir, |
@@ -2177,16 +2162,16 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, | |||
2177 | goto out; | 2162 | goto out; |
2178 | 2163 | ||
2179 | result = ERR_PTR(-EACCES); | 2164 | result = ERR_PTR(-EACCES); |
2180 | if (lock_trace(task)) | 2165 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
2181 | goto out_put_task; | 2166 | goto out_put_task; |
2182 | 2167 | ||
2183 | result = ERR_PTR(-ENOENT); | 2168 | result = ERR_PTR(-ENOENT); |
2184 | if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) | 2169 | if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) |
2185 | goto out_unlock; | 2170 | goto out_put_task; |
2186 | 2171 | ||
2187 | mm = get_task_mm(task); | 2172 | mm = get_task_mm(task); |
2188 | if (!mm) | 2173 | if (!mm) |
2189 | goto out_unlock; | 2174 | goto out_put_task; |
2190 | 2175 | ||
2191 | down_read(&mm->mmap_sem); | 2176 | down_read(&mm->mmap_sem); |
2192 | vma = find_exact_vma(mm, vm_start, vm_end); | 2177 | vma = find_exact_vma(mm, vm_start, vm_end); |
@@ -2198,8 +2183,6 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, | |||
2198 | out_no_vma: | 2183 | out_no_vma: |
2199 | up_read(&mm->mmap_sem); | 2184 | up_read(&mm->mmap_sem); |
2200 | mmput(mm); | 2185 | mmput(mm); |
2201 | out_unlock: | ||
2202 | unlock_trace(task); | ||
2203 | out_put_task: | 2186 | out_put_task: |
2204 | put_task_struct(task); | 2187 | put_task_struct(task); |
2205 | out: | 2188 | out: |
@@ -2233,7 +2216,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
2233 | goto out; | 2216 | goto out; |
2234 | 2217 | ||
2235 | ret = -EACCES; | 2218 | ret = -EACCES; |
2236 | if (lock_trace(task)) | 2219 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
2237 | goto out_put_task; | 2220 | goto out_put_task; |
2238 | 2221 | ||
2239 | ret = 0; | 2222 | ret = 0; |
@@ -2241,12 +2224,12 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
2241 | case 0: | 2224 | case 0: |
2242 | ino = inode->i_ino; | 2225 | ino = inode->i_ino; |
2243 | if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0) | 2226 | if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0) |
2244 | goto out_unlock; | 2227 | goto out_put_task; |
2245 | filp->f_pos++; | 2228 | filp->f_pos++; |
2246 | case 1: | 2229 | case 1: |
2247 | ino = parent_ino(dentry); | 2230 | ino = parent_ino(dentry); |
2248 | if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) | 2231 | if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) |
2249 | goto out_unlock; | 2232 | goto out_put_task; |
2250 | filp->f_pos++; | 2233 | filp->f_pos++; |
2251 | default: | 2234 | default: |
2252 | { | 2235 | { |
@@ -2257,7 +2240,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
2257 | 2240 | ||
2258 | mm = get_task_mm(task); | 2241 | mm = get_task_mm(task); |
2259 | if (!mm) | 2242 | if (!mm) |
2260 | goto out_unlock; | 2243 | goto out_put_task; |
2261 | down_read(&mm->mmap_sem); | 2244 | down_read(&mm->mmap_sem); |
2262 | 2245 | ||
2263 | nr_files = 0; | 2246 | nr_files = 0; |
@@ -2287,7 +2270,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
2287 | flex_array_free(fa); | 2270 | flex_array_free(fa); |
2288 | up_read(&mm->mmap_sem); | 2271 | up_read(&mm->mmap_sem); |
2289 | mmput(mm); | 2272 | mmput(mm); |
2290 | goto out_unlock; | 2273 | goto out_put_task; |
2291 | } | 2274 | } |
2292 | for (i = 0, vma = mm->mmap, pos = 2; vma; | 2275 | for (i = 0, vma = mm->mmap, pos = 2; vma; |
2293 | vma = vma->vm_next) { | 2276 | vma = vma->vm_next) { |
@@ -2332,8 +2315,6 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
2332 | } | 2315 | } |
2333 | } | 2316 | } |
2334 | 2317 | ||
2335 | out_unlock: | ||
2336 | unlock_trace(task); | ||
2337 | out_put_task: | 2318 | out_put_task: |
2338 | put_task_struct(task); | 2319 | put_task_struct(task); |
2339 | out: | 2320 | out: |
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 8007ae7c0d8c..23ade2680a4a 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig | |||
@@ -11,3 +11,20 @@ config PSTORE | |||
11 | (e.g. ACPI_APEI on X86) which will select this for you. | 11 | (e.g. ACPI_APEI on X86) which will select this for you. |
12 | If you don't have a platform persistent store driver, | 12 | If you don't have a platform persistent store driver, |
13 | say N. | 13 | say N. |
14 | |||
15 | config PSTORE_RAM | ||
16 | tristate "Log panic/oops to a RAM buffer" | ||
17 | depends on PSTORE | ||
18 | depends on HAS_IOMEM | ||
19 | depends on HAVE_MEMBLOCK | ||
20 | select REED_SOLOMON | ||
21 | select REED_SOLOMON_ENC8 | ||
22 | select REED_SOLOMON_DEC8 | ||
23 | help | ||
24 | This enables panic and oops messages to be logged to a circular | ||
25 | buffer in RAM where it can be read back at some later point. | ||
26 | |||
27 | Note that for historical reasons, the module will be named | ||
28 | "ramoops.ko". | ||
29 | |||
30 | For more information, see Documentation/ramoops.txt. | ||
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile index 760f4bce7d1d..278a44e0d4e1 100644 --- a/fs/pstore/Makefile +++ b/fs/pstore/Makefile | |||
@@ -5,3 +5,6 @@ | |||
5 | obj-y += pstore.o | 5 | obj-y += pstore.o |
6 | 6 | ||
7 | pstore-objs += inode.o platform.o | 7 | pstore-objs += inode.o platform.o |
8 | |||
9 | ramoops-objs += ram.o ram_core.o | ||
10 | obj-$(CONFIG_PSTORE_RAM) += ramoops.o | ||
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c new file mode 100644 index 000000000000..9123cce28c1e --- /dev/null +++ b/fs/pstore/ram.c | |||
@@ -0,0 +1,383 @@ | |||
1 | /* | ||
2 | * RAM Oops/Panic logger | ||
3 | * | ||
4 | * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> | ||
5 | * Copyright (C) 2011 Kees Cook <keescook@chromium.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
19 | * 02110-1301 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/pstore.h> | ||
29 | #include <linux/time.h> | ||
30 | #include <linux/io.h> | ||
31 | #include <linux/ioport.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/pstore_ram.h> | ||
35 | |||
36 | #define RAMOOPS_KERNMSG_HDR "====" | ||
37 | #define MIN_MEM_SIZE 4096UL | ||
38 | |||
39 | static ulong record_size = MIN_MEM_SIZE; | ||
40 | module_param(record_size, ulong, 0400); | ||
41 | MODULE_PARM_DESC(record_size, | ||
42 | "size of each dump done on oops/panic"); | ||
43 | |||
44 | static ulong mem_address; | ||
45 | module_param(mem_address, ulong, 0400); | ||
46 | MODULE_PARM_DESC(mem_address, | ||
47 | "start of reserved RAM used to store oops/panic logs"); | ||
48 | |||
49 | static ulong mem_size; | ||
50 | module_param(mem_size, ulong, 0400); | ||
51 | MODULE_PARM_DESC(mem_size, | ||
52 | "size of reserved RAM used to store oops/panic logs"); | ||
53 | |||
54 | static int dump_oops = 1; | ||
55 | module_param(dump_oops, int, 0600); | ||
56 | MODULE_PARM_DESC(dump_oops, | ||
57 | "set to 1 to dump oopses, 0 to only dump panics (default 1)"); | ||
58 | |||
59 | static int ramoops_ecc; | ||
60 | module_param_named(ecc, ramoops_ecc, int, 0600); | ||
61 | MODULE_PARM_DESC(ramoops_ecc, | ||
62 | "set to 1 to enable ECC support"); | ||
63 | |||
64 | struct ramoops_context { | ||
65 | struct persistent_ram_zone **przs; | ||
66 | phys_addr_t phys_addr; | ||
67 | unsigned long size; | ||
68 | size_t record_size; | ||
69 | int dump_oops; | ||
70 | bool ecc; | ||
71 | unsigned int count; | ||
72 | unsigned int max_count; | ||
73 | unsigned int read_count; | ||
74 | struct pstore_info pstore; | ||
75 | }; | ||
76 | |||
77 | static struct platform_device *dummy; | ||
78 | static struct ramoops_platform_data *dummy_data; | ||
79 | |||
80 | static int ramoops_pstore_open(struct pstore_info *psi) | ||
81 | { | ||
82 | struct ramoops_context *cxt = psi->data; | ||
83 | |||
84 | cxt->read_count = 0; | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, | ||
89 | struct timespec *time, | ||
90 | char **buf, | ||
91 | struct pstore_info *psi) | ||
92 | { | ||
93 | ssize_t size; | ||
94 | struct ramoops_context *cxt = psi->data; | ||
95 | struct persistent_ram_zone *prz; | ||
96 | |||
97 | if (cxt->read_count >= cxt->max_count) | ||
98 | return -EINVAL; | ||
99 | |||
100 | *id = cxt->read_count++; | ||
101 | prz = cxt->przs[*id]; | ||
102 | |||
103 | /* Only supports dmesg output so far. */ | ||
104 | *type = PSTORE_TYPE_DMESG; | ||
105 | /* TODO(kees): Bogus time for the moment. */ | ||
106 | time->tv_sec = 0; | ||
107 | time->tv_nsec = 0; | ||
108 | |||
109 | size = persistent_ram_old_size(prz); | ||
110 | *buf = kmalloc(size, GFP_KERNEL); | ||
111 | if (*buf == NULL) | ||
112 | return -ENOMEM; | ||
113 | memcpy(*buf, persistent_ram_old(prz), size); | ||
114 | |||
115 | return size; | ||
116 | } | ||
117 | |||
118 | static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz) | ||
119 | { | ||
120 | char *hdr; | ||
121 | struct timeval timestamp; | ||
122 | size_t len; | ||
123 | |||
124 | do_gettimeofday(×tamp); | ||
125 | hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n", | ||
126 | (long)timestamp.tv_sec, (long)timestamp.tv_usec); | ||
127 | WARN_ON_ONCE(!hdr); | ||
128 | len = hdr ? strlen(hdr) : 0; | ||
129 | persistent_ram_write(prz, hdr, len); | ||
130 | kfree(hdr); | ||
131 | |||
132 | return len; | ||
133 | } | ||
134 | |||
135 | static int ramoops_pstore_write(enum pstore_type_id type, | ||
136 | enum kmsg_dump_reason reason, | ||
137 | u64 *id, | ||
138 | unsigned int part, | ||
139 | size_t size, struct pstore_info *psi) | ||
140 | { | ||
141 | struct ramoops_context *cxt = psi->data; | ||
142 | struct persistent_ram_zone *prz = cxt->przs[cxt->count]; | ||
143 | size_t hlen; | ||
144 | |||
145 | /* Currently ramoops is designed to only store dmesg dumps. */ | ||
146 | if (type != PSTORE_TYPE_DMESG) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* Out of the various dmesg dump types, ramoops is currently designed | ||
150 | * to only store crash logs, rather than storing general kernel logs. | ||
151 | */ | ||
152 | if (reason != KMSG_DUMP_OOPS && | ||
153 | reason != KMSG_DUMP_PANIC) | ||
154 | return -EINVAL; | ||
155 | |||
156 | /* Skip Oopes when configured to do so. */ | ||
157 | if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops) | ||
158 | return -EINVAL; | ||
159 | |||
160 | /* Explicitly only take the first part of any new crash. | ||
161 | * If our buffer is larger than kmsg_bytes, this can never happen, | ||
162 | * and if our buffer is smaller than kmsg_bytes, we don't want the | ||
163 | * report split across multiple records. | ||
164 | */ | ||
165 | if (part != 1) | ||
166 | return -ENOSPC; | ||
167 | |||
168 | hlen = ramoops_write_kmsg_hdr(prz); | ||
169 | if (size + hlen > prz->buffer_size) | ||
170 | size = prz->buffer_size - hlen; | ||
171 | persistent_ram_write(prz, cxt->pstore.buf, size); | ||
172 | |||
173 | cxt->count = (cxt->count + 1) % cxt->max_count; | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, | ||
179 | struct pstore_info *psi) | ||
180 | { | ||
181 | struct ramoops_context *cxt = psi->data; | ||
182 | |||
183 | if (id >= cxt->max_count) | ||
184 | return -EINVAL; | ||
185 | |||
186 | persistent_ram_free_old(cxt->przs[id]); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static struct ramoops_context oops_cxt = { | ||
192 | .pstore = { | ||
193 | .owner = THIS_MODULE, | ||
194 | .name = "ramoops", | ||
195 | .open = ramoops_pstore_open, | ||
196 | .read = ramoops_pstore_read, | ||
197 | .write = ramoops_pstore_write, | ||
198 | .erase = ramoops_pstore_erase, | ||
199 | }, | ||
200 | }; | ||
201 | |||
202 | static int __init ramoops_probe(struct platform_device *pdev) | ||
203 | { | ||
204 | struct device *dev = &pdev->dev; | ||
205 | struct ramoops_platform_data *pdata = pdev->dev.platform_data; | ||
206 | struct ramoops_context *cxt = &oops_cxt; | ||
207 | int err = -EINVAL; | ||
208 | int i; | ||
209 | |||
210 | /* Only a single ramoops area allowed at a time, so fail extra | ||
211 | * probes. | ||
212 | */ | ||
213 | if (cxt->max_count) | ||
214 | goto fail_out; | ||
215 | |||
216 | if (!pdata->mem_size || !pdata->record_size) { | ||
217 | pr_err("The memory size and the record size must be " | ||
218 | "non-zero\n"); | ||
219 | goto fail_out; | ||
220 | } | ||
221 | |||
222 | pdata->mem_size = rounddown_pow_of_two(pdata->mem_size); | ||
223 | pdata->record_size = rounddown_pow_of_two(pdata->record_size); | ||
224 | |||
225 | /* Check for the minimum memory size */ | ||
226 | if (pdata->mem_size < MIN_MEM_SIZE && | ||
227 | pdata->record_size < MIN_MEM_SIZE) { | ||
228 | pr_err("memory size too small, minimum is %lu\n", | ||
229 | MIN_MEM_SIZE); | ||
230 | goto fail_out; | ||
231 | } | ||
232 | |||
233 | if (pdata->mem_size < pdata->record_size) { | ||
234 | pr_err("The memory size must be larger than the " | ||
235 | "records size\n"); | ||
236 | goto fail_out; | ||
237 | } | ||
238 | |||
239 | cxt->max_count = pdata->mem_size / pdata->record_size; | ||
240 | cxt->count = 0; | ||
241 | cxt->size = pdata->mem_size; | ||
242 | cxt->phys_addr = pdata->mem_address; | ||
243 | cxt->record_size = pdata->record_size; | ||
244 | cxt->dump_oops = pdata->dump_oops; | ||
245 | cxt->ecc = pdata->ecc; | ||
246 | |||
247 | cxt->przs = kzalloc(sizeof(*cxt->przs) * cxt->max_count, GFP_KERNEL); | ||
248 | if (!cxt->przs) { | ||
249 | err = -ENOMEM; | ||
250 | dev_err(dev, "failed to initialize a prz array\n"); | ||
251 | goto fail_out; | ||
252 | } | ||
253 | |||
254 | for (i = 0; i < cxt->max_count; i++) { | ||
255 | size_t sz = cxt->record_size; | ||
256 | phys_addr_t start = cxt->phys_addr + sz * i; | ||
257 | |||
258 | cxt->przs[i] = persistent_ram_new(start, sz, cxt->ecc); | ||
259 | if (IS_ERR(cxt->przs[i])) { | ||
260 | err = PTR_ERR(cxt->przs[i]); | ||
261 | dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", | ||
262 | sz, (unsigned long long)start, err); | ||
263 | goto fail_przs; | ||
264 | } | ||
265 | } | ||
266 | |||
267 | cxt->pstore.data = cxt; | ||
268 | cxt->pstore.bufsize = cxt->przs[0]->buffer_size; | ||
269 | cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL); | ||
270 | spin_lock_init(&cxt->pstore.buf_lock); | ||
271 | if (!cxt->pstore.buf) { | ||
272 | pr_err("cannot allocate pstore buffer\n"); | ||
273 | goto fail_clear; | ||
274 | } | ||
275 | |||
276 | err = pstore_register(&cxt->pstore); | ||
277 | if (err) { | ||
278 | pr_err("registering with pstore failed\n"); | ||
279 | goto fail_buf; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Update the module parameter variables as well so they are visible | ||
284 | * through /sys/module/ramoops/parameters/ | ||
285 | */ | ||
286 | mem_size = pdata->mem_size; | ||
287 | mem_address = pdata->mem_address; | ||
288 | record_size = pdata->record_size; | ||
289 | dump_oops = pdata->dump_oops; | ||
290 | |||
291 | pr_info("attached 0x%lx@0x%llx (%ux0x%zx), ecc: %s\n", | ||
292 | cxt->size, (unsigned long long)cxt->phys_addr, | ||
293 | cxt->max_count, cxt->record_size, | ||
294 | ramoops_ecc ? "on" : "off"); | ||
295 | |||
296 | return 0; | ||
297 | |||
298 | fail_buf: | ||
299 | kfree(cxt->pstore.buf); | ||
300 | fail_clear: | ||
301 | cxt->pstore.bufsize = 0; | ||
302 | cxt->max_count = 0; | ||
303 | fail_przs: | ||
304 | for (i = 0; cxt->przs[i]; i++) | ||
305 | persistent_ram_free(cxt->przs[i]); | ||
306 | kfree(cxt->przs); | ||
307 | fail_out: | ||
308 | return err; | ||
309 | } | ||
310 | |||
311 | static int __exit ramoops_remove(struct platform_device *pdev) | ||
312 | { | ||
313 | #if 0 | ||
314 | /* TODO(kees): We cannot unload ramoops since pstore doesn't support | ||
315 | * unregistering yet. | ||
316 | */ | ||
317 | struct ramoops_context *cxt = &oops_cxt; | ||
318 | |||
319 | iounmap(cxt->virt_addr); | ||
320 | release_mem_region(cxt->phys_addr, cxt->size); | ||
321 | cxt->max_count = 0; | ||
322 | |||
323 | /* TODO(kees): When pstore supports unregistering, call it here. */ | ||
324 | kfree(cxt->pstore.buf); | ||
325 | cxt->pstore.bufsize = 0; | ||
326 | |||
327 | return 0; | ||
328 | #endif | ||
329 | return -EBUSY; | ||
330 | } | ||
331 | |||
332 | static struct platform_driver ramoops_driver = { | ||
333 | .remove = __exit_p(ramoops_remove), | ||
334 | .driver = { | ||
335 | .name = "ramoops", | ||
336 | .owner = THIS_MODULE, | ||
337 | }, | ||
338 | }; | ||
339 | |||
340 | static int __init ramoops_init(void) | ||
341 | { | ||
342 | int ret; | ||
343 | ret = platform_driver_probe(&ramoops_driver, ramoops_probe); | ||
344 | if (ret == -ENODEV) { | ||
345 | /* | ||
346 | * If we didn't find a platform device, we use module parameters | ||
347 | * building platform data on the fly. | ||
348 | */ | ||
349 | pr_info("platform device not found, using module parameters\n"); | ||
350 | dummy_data = kzalloc(sizeof(struct ramoops_platform_data), | ||
351 | GFP_KERNEL); | ||
352 | if (!dummy_data) | ||
353 | return -ENOMEM; | ||
354 | dummy_data->mem_size = mem_size; | ||
355 | dummy_data->mem_address = mem_address; | ||
356 | dummy_data->record_size = record_size; | ||
357 | dummy_data->dump_oops = dump_oops; | ||
358 | dummy_data->ecc = ramoops_ecc; | ||
359 | dummy = platform_create_bundle(&ramoops_driver, ramoops_probe, | ||
360 | NULL, 0, dummy_data, | ||
361 | sizeof(struct ramoops_platform_data)); | ||
362 | |||
363 | if (IS_ERR(dummy)) | ||
364 | ret = PTR_ERR(dummy); | ||
365 | else | ||
366 | ret = 0; | ||
367 | } | ||
368 | |||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | static void __exit ramoops_exit(void) | ||
373 | { | ||
374 | platform_driver_unregister(&ramoops_driver); | ||
375 | kfree(dummy_data); | ||
376 | } | ||
377 | |||
378 | module_init(ramoops_init); | ||
379 | module_exit(ramoops_exit); | ||
380 | |||
381 | MODULE_LICENSE("GPL"); | ||
382 | MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); | ||
383 | MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); | ||
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c new file mode 100644 index 000000000000..31f8d184f3a0 --- /dev/null +++ b/fs/pstore/ram_core.c | |||
@@ -0,0 +1,532 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Google, Inc. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/memblock.h> | ||
23 | #include <linux/rslib.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/pstore_ram.h> | ||
27 | #include <asm/page.h> | ||
28 | |||
29 | struct persistent_ram_buffer { | ||
30 | uint32_t sig; | ||
31 | atomic_t start; | ||
32 | atomic_t size; | ||
33 | uint8_t data[0]; | ||
34 | }; | ||
35 | |||
36 | #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ | ||
37 | |||
38 | static __initdata LIST_HEAD(persistent_ram_list); | ||
39 | |||
40 | static inline size_t buffer_size(struct persistent_ram_zone *prz) | ||
41 | { | ||
42 | return atomic_read(&prz->buffer->size); | ||
43 | } | ||
44 | |||
45 | static inline size_t buffer_start(struct persistent_ram_zone *prz) | ||
46 | { | ||
47 | return atomic_read(&prz->buffer->start); | ||
48 | } | ||
49 | |||
50 | /* increase and wrap the start pointer, returning the old value */ | ||
51 | static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) | ||
52 | { | ||
53 | int old; | ||
54 | int new; | ||
55 | |||
56 | do { | ||
57 | old = atomic_read(&prz->buffer->start); | ||
58 | new = old + a; | ||
59 | while (unlikely(new > prz->buffer_size)) | ||
60 | new -= prz->buffer_size; | ||
61 | } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old); | ||
62 | |||
63 | return old; | ||
64 | } | ||
65 | |||
66 | /* increase the size counter until it hits the max size */ | ||
67 | static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a) | ||
68 | { | ||
69 | size_t old; | ||
70 | size_t new; | ||
71 | |||
72 | if (atomic_read(&prz->buffer->size) == prz->buffer_size) | ||
73 | return; | ||
74 | |||
75 | do { | ||
76 | old = atomic_read(&prz->buffer->size); | ||
77 | new = old + a; | ||
78 | if (new > prz->buffer_size) | ||
79 | new = prz->buffer_size; | ||
80 | } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); | ||
81 | } | ||
82 | |||
83 | static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, | ||
84 | uint8_t *data, size_t len, uint8_t *ecc) | ||
85 | { | ||
86 | int i; | ||
87 | uint16_t par[prz->ecc_size]; | ||
88 | |||
89 | /* Initialize the parity buffer */ | ||
90 | memset(par, 0, sizeof(par)); | ||
91 | encode_rs8(prz->rs_decoder, data, len, par, 0); | ||
92 | for (i = 0; i < prz->ecc_size; i++) | ||
93 | ecc[i] = par[i]; | ||
94 | } | ||
95 | |||
96 | static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, | ||
97 | void *data, size_t len, uint8_t *ecc) | ||
98 | { | ||
99 | int i; | ||
100 | uint16_t par[prz->ecc_size]; | ||
101 | |||
102 | for (i = 0; i < prz->ecc_size; i++) | ||
103 | par[i] = ecc[i]; | ||
104 | return decode_rs8(prz->rs_decoder, data, par, len, | ||
105 | NULL, 0, NULL, 0, NULL); | ||
106 | } | ||
107 | |||
108 | static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, | ||
109 | unsigned int start, unsigned int count) | ||
110 | { | ||
111 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
112 | uint8_t *buffer_end = buffer->data + prz->buffer_size; | ||
113 | uint8_t *block; | ||
114 | uint8_t *par; | ||
115 | int ecc_block_size = prz->ecc_block_size; | ||
116 | int ecc_size = prz->ecc_size; | ||
117 | int size = prz->ecc_block_size; | ||
118 | |||
119 | if (!prz->ecc) | ||
120 | return; | ||
121 | |||
122 | block = buffer->data + (start & ~(ecc_block_size - 1)); | ||
123 | par = prz->par_buffer + (start / ecc_block_size) * prz->ecc_size; | ||
124 | |||
125 | do { | ||
126 | if (block + ecc_block_size > buffer_end) | ||
127 | size = buffer_end - block; | ||
128 | persistent_ram_encode_rs8(prz, block, size, par); | ||
129 | block += ecc_block_size; | ||
130 | par += ecc_size; | ||
131 | } while (block < buffer->data + start + count); | ||
132 | } | ||
133 | |||
134 | static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz) | ||
135 | { | ||
136 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
137 | |||
138 | if (!prz->ecc) | ||
139 | return; | ||
140 | |||
141 | persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer), | ||
142 | prz->par_header); | ||
143 | } | ||
144 | |||
145 | static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) | ||
146 | { | ||
147 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
148 | uint8_t *block; | ||
149 | uint8_t *par; | ||
150 | |||
151 | if (!prz->ecc) | ||
152 | return; | ||
153 | |||
154 | block = buffer->data; | ||
155 | par = prz->par_buffer; | ||
156 | while (block < buffer->data + buffer_size(prz)) { | ||
157 | int numerr; | ||
158 | int size = prz->ecc_block_size; | ||
159 | if (block + size > buffer->data + prz->buffer_size) | ||
160 | size = buffer->data + prz->buffer_size - block; | ||
161 | numerr = persistent_ram_decode_rs8(prz, block, size, par); | ||
162 | if (numerr > 0) { | ||
163 | pr_devel("persistent_ram: error in block %p, %d\n", | ||
164 | block, numerr); | ||
165 | prz->corrected_bytes += numerr; | ||
166 | } else if (numerr < 0) { | ||
167 | pr_devel("persistent_ram: uncorrectable error in block %p\n", | ||
168 | block); | ||
169 | prz->bad_blocks++; | ||
170 | } | ||
171 | block += prz->ecc_block_size; | ||
172 | par += prz->ecc_size; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, | ||
177 | size_t buffer_size) | ||
178 | { | ||
179 | int numerr; | ||
180 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
181 | int ecc_blocks; | ||
182 | |||
183 | if (!prz->ecc) | ||
184 | return 0; | ||
185 | |||
186 | prz->ecc_block_size = 128; | ||
187 | prz->ecc_size = 16; | ||
188 | prz->ecc_symsize = 8; | ||
189 | prz->ecc_poly = 0x11d; | ||
190 | |||
191 | ecc_blocks = DIV_ROUND_UP(prz->buffer_size, prz->ecc_block_size); | ||
192 | prz->buffer_size -= (ecc_blocks + 1) * prz->ecc_size; | ||
193 | |||
194 | if (prz->buffer_size > buffer_size) { | ||
195 | pr_err("persistent_ram: invalid size %zu, non-ecc datasize %zu\n", | ||
196 | buffer_size, prz->buffer_size); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
200 | prz->par_buffer = buffer->data + prz->buffer_size; | ||
201 | prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_size; | ||
202 | |||
203 | /* | ||
204 | * first consecutive root is 0 | ||
205 | * primitive element to generate roots = 1 | ||
206 | */ | ||
207 | prz->rs_decoder = init_rs(prz->ecc_symsize, prz->ecc_poly, 0, 1, | ||
208 | prz->ecc_size); | ||
209 | if (prz->rs_decoder == NULL) { | ||
210 | pr_info("persistent_ram: init_rs failed\n"); | ||
211 | return -EINVAL; | ||
212 | } | ||
213 | |||
214 | prz->corrected_bytes = 0; | ||
215 | prz->bad_blocks = 0; | ||
216 | |||
217 | numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer), | ||
218 | prz->par_header); | ||
219 | if (numerr > 0) { | ||
220 | pr_info("persistent_ram: error in header, %d\n", numerr); | ||
221 | prz->corrected_bytes += numerr; | ||
222 | } else if (numerr < 0) { | ||
223 | pr_info("persistent_ram: uncorrectable error in header\n"); | ||
224 | prz->bad_blocks++; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, | ||
231 | char *str, size_t len) | ||
232 | { | ||
233 | ssize_t ret; | ||
234 | |||
235 | if (prz->corrected_bytes || prz->bad_blocks) | ||
236 | ret = snprintf(str, len, "" | ||
237 | "\n%d Corrected bytes, %d unrecoverable blocks\n", | ||
238 | prz->corrected_bytes, prz->bad_blocks); | ||
239 | else | ||
240 | ret = snprintf(str, len, "\nNo errors detected\n"); | ||
241 | |||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | static void notrace persistent_ram_update(struct persistent_ram_zone *prz, | ||
246 | const void *s, unsigned int start, unsigned int count) | ||
247 | { | ||
248 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
249 | memcpy(buffer->data + start, s, count); | ||
250 | persistent_ram_update_ecc(prz, start, count); | ||
251 | } | ||
252 | |||
253 | static void __init | ||
254 | persistent_ram_save_old(struct persistent_ram_zone *prz) | ||
255 | { | ||
256 | struct persistent_ram_buffer *buffer = prz->buffer; | ||
257 | size_t size = buffer_size(prz); | ||
258 | size_t start = buffer_start(prz); | ||
259 | char *dest; | ||
260 | |||
261 | persistent_ram_ecc_old(prz); | ||
262 | |||
263 | dest = kmalloc(size, GFP_KERNEL); | ||
264 | if (dest == NULL) { | ||
265 | pr_err("persistent_ram: failed to allocate buffer\n"); | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | prz->old_log = dest; | ||
270 | prz->old_log_size = size; | ||
271 | memcpy(prz->old_log, &buffer->data[start], size - start); | ||
272 | memcpy(prz->old_log + size - start, &buffer->data[0], start); | ||
273 | } | ||
274 | |||
275 | int notrace persistent_ram_write(struct persistent_ram_zone *prz, | ||
276 | const void *s, unsigned int count) | ||
277 | { | ||
278 | int rem; | ||
279 | int c = count; | ||
280 | size_t start; | ||
281 | |||
282 | if (unlikely(c > prz->buffer_size)) { | ||
283 | s += c - prz->buffer_size; | ||
284 | c = prz->buffer_size; | ||
285 | } | ||
286 | |||
287 | buffer_size_add(prz, c); | ||
288 | |||
289 | start = buffer_start_add(prz, c); | ||
290 | |||
291 | rem = prz->buffer_size - start; | ||
292 | if (unlikely(rem < c)) { | ||
293 | persistent_ram_update(prz, s, start, rem); | ||
294 | s += rem; | ||
295 | c -= rem; | ||
296 | start = 0; | ||
297 | } | ||
298 | persistent_ram_update(prz, s, start, c); | ||
299 | |||
300 | persistent_ram_update_header_ecc(prz); | ||
301 | |||
302 | return count; | ||
303 | } | ||
304 | |||
305 | size_t persistent_ram_old_size(struct persistent_ram_zone *prz) | ||
306 | { | ||
307 | return prz->old_log_size; | ||
308 | } | ||
309 | |||
310 | void *persistent_ram_old(struct persistent_ram_zone *prz) | ||
311 | { | ||
312 | return prz->old_log; | ||
313 | } | ||
314 | |||
315 | void persistent_ram_free_old(struct persistent_ram_zone *prz) | ||
316 | { | ||
317 | kfree(prz->old_log); | ||
318 | prz->old_log = NULL; | ||
319 | prz->old_log_size = 0; | ||
320 | } | ||
321 | |||
322 | static void *persistent_ram_vmap(phys_addr_t start, size_t size) | ||
323 | { | ||
324 | struct page **pages; | ||
325 | phys_addr_t page_start; | ||
326 | unsigned int page_count; | ||
327 | pgprot_t prot; | ||
328 | unsigned int i; | ||
329 | void *vaddr; | ||
330 | |||
331 | page_start = start - offset_in_page(start); | ||
332 | page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); | ||
333 | |||
334 | prot = pgprot_noncached(PAGE_KERNEL); | ||
335 | |||
336 | pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL); | ||
337 | if (!pages) { | ||
338 | pr_err("%s: Failed to allocate array for %u pages\n", __func__, | ||
339 | page_count); | ||
340 | return NULL; | ||
341 | } | ||
342 | |||
343 | for (i = 0; i < page_count; i++) { | ||
344 | phys_addr_t addr = page_start + i * PAGE_SIZE; | ||
345 | pages[i] = pfn_to_page(addr >> PAGE_SHIFT); | ||
346 | } | ||
347 | vaddr = vmap(pages, page_count, VM_MAP, prot); | ||
348 | kfree(pages); | ||
349 | |||
350 | return vaddr; | ||
351 | } | ||
352 | |||
353 | static void *persistent_ram_iomap(phys_addr_t start, size_t size) | ||
354 | { | ||
355 | if (!request_mem_region(start, size, "persistent_ram")) { | ||
356 | pr_err("request mem region (0x%llx@0x%llx) failed\n", | ||
357 | (unsigned long long)size, (unsigned long long)start); | ||
358 | return NULL; | ||
359 | } | ||
360 | |||
361 | return ioremap(start, size); | ||
362 | } | ||
363 | |||
364 | static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, | ||
365 | struct persistent_ram_zone *prz) | ||
366 | { | ||
367 | prz->paddr = start; | ||
368 | prz->size = size; | ||
369 | |||
370 | if (pfn_valid(start >> PAGE_SHIFT)) | ||
371 | prz->vaddr = persistent_ram_vmap(start, size); | ||
372 | else | ||
373 | prz->vaddr = persistent_ram_iomap(start, size); | ||
374 | |||
375 | if (!prz->vaddr) { | ||
376 | pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, | ||
377 | (unsigned long long)size, (unsigned long long)start); | ||
378 | return -ENOMEM; | ||
379 | } | ||
380 | |||
381 | prz->buffer = prz->vaddr + offset_in_page(start); | ||
382 | prz->buffer_size = size - sizeof(struct persistent_ram_buffer); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool ecc) | ||
388 | { | ||
389 | int ret; | ||
390 | |||
391 | prz->ecc = ecc; | ||
392 | |||
393 | ret = persistent_ram_init_ecc(prz, prz->buffer_size); | ||
394 | if (ret) | ||
395 | return ret; | ||
396 | |||
397 | if (prz->buffer->sig == PERSISTENT_RAM_SIG) { | ||
398 | if (buffer_size(prz) > prz->buffer_size || | ||
399 | buffer_start(prz) > buffer_size(prz)) | ||
400 | pr_info("persistent_ram: found existing invalid buffer," | ||
401 | " size %zu, start %zu\n", | ||
402 | buffer_size(prz), buffer_start(prz)); | ||
403 | else { | ||
404 | pr_info("persistent_ram: found existing buffer," | ||
405 | " size %zu, start %zu\n", | ||
406 | buffer_size(prz), buffer_start(prz)); | ||
407 | persistent_ram_save_old(prz); | ||
408 | } | ||
409 | } else { | ||
410 | pr_info("persistent_ram: no valid data in buffer" | ||
411 | " (sig = 0x%08x)\n", prz->buffer->sig); | ||
412 | } | ||
413 | |||
414 | prz->buffer->sig = PERSISTENT_RAM_SIG; | ||
415 | atomic_set(&prz->buffer->start, 0); | ||
416 | atomic_set(&prz->buffer->size, 0); | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | void persistent_ram_free(struct persistent_ram_zone *prz) | ||
422 | { | ||
423 | if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { | ||
424 | vunmap(prz->vaddr); | ||
425 | } else { | ||
426 | iounmap(prz->vaddr); | ||
427 | release_mem_region(prz->paddr, prz->size); | ||
428 | } | ||
429 | persistent_ram_free_old(prz); | ||
430 | kfree(prz); | ||
431 | } | ||
432 | |||
433 | struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start, | ||
434 | size_t size, | ||
435 | bool ecc) | ||
436 | { | ||
437 | struct persistent_ram_zone *prz; | ||
438 | int ret = -ENOMEM; | ||
439 | |||
440 | prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); | ||
441 | if (!prz) { | ||
442 | pr_err("persistent_ram: failed to allocate persistent ram zone\n"); | ||
443 | goto err; | ||
444 | } | ||
445 | |||
446 | ret = persistent_ram_buffer_map(start, size, prz); | ||
447 | if (ret) | ||
448 | goto err; | ||
449 | |||
450 | persistent_ram_post_init(prz, ecc); | ||
451 | persistent_ram_update_header_ecc(prz); | ||
452 | |||
453 | return prz; | ||
454 | err: | ||
455 | kfree(prz); | ||
456 | return ERR_PTR(ret); | ||
457 | } | ||
458 | |||
459 | #ifndef MODULE | ||
460 | static int __init persistent_ram_buffer_init(const char *name, | ||
461 | struct persistent_ram_zone *prz) | ||
462 | { | ||
463 | int i; | ||
464 | struct persistent_ram *ram; | ||
465 | struct persistent_ram_descriptor *desc; | ||
466 | phys_addr_t start; | ||
467 | |||
468 | list_for_each_entry(ram, &persistent_ram_list, node) { | ||
469 | start = ram->start; | ||
470 | for (i = 0; i < ram->num_descs; i++) { | ||
471 | desc = &ram->descs[i]; | ||
472 | if (!strcmp(desc->name, name)) | ||
473 | return persistent_ram_buffer_map(start, | ||
474 | desc->size, prz); | ||
475 | start += desc->size; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | return -EINVAL; | ||
480 | } | ||
481 | |||
482 | static __init | ||
483 | struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc) | ||
484 | { | ||
485 | struct persistent_ram_zone *prz; | ||
486 | int ret = -ENOMEM; | ||
487 | |||
488 | prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); | ||
489 | if (!prz) { | ||
490 | pr_err("persistent_ram: failed to allocate persistent ram zone\n"); | ||
491 | goto err; | ||
492 | } | ||
493 | |||
494 | ret = persistent_ram_buffer_init(dev_name(dev), prz); | ||
495 | if (ret) { | ||
496 | pr_err("persistent_ram: failed to initialize buffer\n"); | ||
497 | goto err; | ||
498 | } | ||
499 | |||
500 | persistent_ram_post_init(prz, ecc); | ||
501 | |||
502 | return prz; | ||
503 | err: | ||
504 | kfree(prz); | ||
505 | return ERR_PTR(ret); | ||
506 | } | ||
507 | |||
508 | struct persistent_ram_zone * __init | ||
509 | persistent_ram_init_ringbuffer(struct device *dev, bool ecc) | ||
510 | { | ||
511 | return __persistent_ram_init(dev, ecc); | ||
512 | } | ||
513 | |||
514 | int __init persistent_ram_early_init(struct persistent_ram *ram) | ||
515 | { | ||
516 | int ret; | ||
517 | |||
518 | ret = memblock_reserve(ram->start, ram->size); | ||
519 | if (ret) { | ||
520 | pr_err("Failed to reserve persistent memory from %08lx-%08lx\n", | ||
521 | (long)ram->start, (long)(ram->start + ram->size - 1)); | ||
522 | return ret; | ||
523 | } | ||
524 | |||
525 | list_add_tail(&ram->node, &persistent_ram_list); | ||
526 | |||
527 | pr_info("Initialized persistent memory from %08lx-%08lx\n", | ||
528 | (long)ram->start, (long)(ram->start + ram->size - 1)); | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | #endif | ||
@@ -57,12 +57,13 @@ EXPORT_SYMBOL(vfs_getattr); | |||
57 | 57 | ||
58 | int vfs_fstat(unsigned int fd, struct kstat *stat) | 58 | int vfs_fstat(unsigned int fd, struct kstat *stat) |
59 | { | 59 | { |
60 | struct file *f = fget(fd); | 60 | int fput_needed; |
61 | struct file *f = fget_light(fd, &fput_needed); | ||
61 | int error = -EBADF; | 62 | int error = -EBADF; |
62 | 63 | ||
63 | if (f) { | 64 | if (f) { |
64 | error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat); | 65 | error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat); |
65 | fput(f); | 66 | fput_light(f, fput_needed); |
66 | } | 67 | } |
67 | return error; | 68 | return error; |
68 | } | 69 | } |
@@ -190,24 +191,32 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat | |||
190 | 191 | ||
191 | #endif /* __ARCH_WANT_OLD_STAT */ | 192 | #endif /* __ARCH_WANT_OLD_STAT */ |
192 | 193 | ||
194 | #if BITS_PER_LONG == 32 | ||
195 | # define choose_32_64(a,b) a | ||
196 | #else | ||
197 | # define choose_32_64(a,b) b | ||
198 | #endif | ||
199 | |||
200 | #define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x) | ||
201 | #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x) | ||
202 | |||
203 | #ifndef INIT_STRUCT_STAT_PADDING | ||
204 | # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) | ||
205 | #endif | ||
206 | |||
193 | static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) | 207 | static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) |
194 | { | 208 | { |
195 | struct stat tmp; | 209 | struct stat tmp; |
196 | 210 | ||
197 | #if BITS_PER_LONG == 32 | 211 | if (!valid_dev(stat->dev) || !valid_dev(stat->rdev)) |
198 | if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) | ||
199 | return -EOVERFLOW; | 212 | return -EOVERFLOW; |
200 | #else | 213 | #if BITS_PER_LONG == 32 |
201 | if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) | 214 | if (stat->size > MAX_NON_LFS) |
202 | return -EOVERFLOW; | 215 | return -EOVERFLOW; |
203 | #endif | 216 | #endif |
204 | 217 | ||
205 | memset(&tmp, 0, sizeof(tmp)); | 218 | INIT_STRUCT_STAT_PADDING(tmp); |
206 | #if BITS_PER_LONG == 32 | 219 | tmp.st_dev = encode_dev(stat->dev); |
207 | tmp.st_dev = old_encode_dev(stat->dev); | ||
208 | #else | ||
209 | tmp.st_dev = new_encode_dev(stat->dev); | ||
210 | #endif | ||
211 | tmp.st_ino = stat->ino; | 220 | tmp.st_ino = stat->ino; |
212 | if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | 221 | if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) |
213 | return -EOVERFLOW; | 222 | return -EOVERFLOW; |
@@ -217,15 +226,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) | |||
217 | return -EOVERFLOW; | 226 | return -EOVERFLOW; |
218 | SET_UID(tmp.st_uid, stat->uid); | 227 | SET_UID(tmp.st_uid, stat->uid); |
219 | SET_GID(tmp.st_gid, stat->gid); | 228 | SET_GID(tmp.st_gid, stat->gid); |
220 | #if BITS_PER_LONG == 32 | 229 | tmp.st_rdev = encode_dev(stat->rdev); |
221 | tmp.st_rdev = old_encode_dev(stat->rdev); | ||
222 | #else | ||
223 | tmp.st_rdev = new_encode_dev(stat->rdev); | ||
224 | #endif | ||
225 | #if BITS_PER_LONG == 32 | ||
226 | if (stat->size > MAX_NON_LFS) | ||
227 | return -EOVERFLOW; | ||
228 | #endif | ||
229 | tmp.st_size = stat->size; | 230 | tmp.st_size = stat->size; |
230 | tmp.st_atime = stat->atime.tv_sec; | 231 | tmp.st_atime = stat->atime.tv_sec; |
231 | tmp.st_mtime = stat->mtime.tv_sec; | 232 | tmp.st_mtime = stat->mtime.tv_sec; |
@@ -327,11 +328,15 @@ SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, | |||
327 | /* ---------- LFS-64 ----------- */ | 328 | /* ---------- LFS-64 ----------- */ |
328 | #ifdef __ARCH_WANT_STAT64 | 329 | #ifdef __ARCH_WANT_STAT64 |
329 | 330 | ||
331 | #ifndef INIT_STRUCT_STAT64_PADDING | ||
332 | # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) | ||
333 | #endif | ||
334 | |||
330 | static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) | 335 | static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) |
331 | { | 336 | { |
332 | struct stat64 tmp; | 337 | struct stat64 tmp; |
333 | 338 | ||
334 | memset(&tmp, 0, sizeof(struct stat64)); | 339 | INIT_STRUCT_STAT64_PADDING(tmp); |
335 | #ifdef CONFIG_MIPS | 340 | #ifdef CONFIG_MIPS |
336 | /* mips has weird padding, so we don't get 64 bits there */ | 341 | /* mips has weird padding, so we don't get 64 bits there */ |
337 | if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) | 342 | if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) |
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 35a36d39fa2c..e6bb9b2a4cbe 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -132,6 +132,24 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) | |||
132 | rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children); | 132 | rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children); |
133 | } | 133 | } |
134 | 134 | ||
135 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
136 | |||
137 | /* Test for attributes that want to ignore lockdep for read-locking */ | ||
138 | static bool ignore_lockdep(struct sysfs_dirent *sd) | ||
139 | { | ||
140 | return sysfs_type(sd) == SYSFS_KOBJ_ATTR && | ||
141 | sd->s_attr.attr->ignore_lockdep; | ||
142 | } | ||
143 | |||
144 | #else | ||
145 | |||
146 | static inline bool ignore_lockdep(struct sysfs_dirent *sd) | ||
147 | { | ||
148 | return true; | ||
149 | } | ||
150 | |||
151 | #endif | ||
152 | |||
135 | /** | 153 | /** |
136 | * sysfs_get_active - get an active reference to sysfs_dirent | 154 | * sysfs_get_active - get an active reference to sysfs_dirent |
137 | * @sd: sysfs_dirent to get an active reference to | 155 | * @sd: sysfs_dirent to get an active reference to |
@@ -155,15 +173,17 @@ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd) | |||
155 | return NULL; | 173 | return NULL; |
156 | 174 | ||
157 | t = atomic_cmpxchg(&sd->s_active, v, v + 1); | 175 | t = atomic_cmpxchg(&sd->s_active, v, v + 1); |
158 | if (likely(t == v)) { | 176 | if (likely(t == v)) |
159 | rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_); | 177 | break; |
160 | return sd; | ||
161 | } | ||
162 | if (t < 0) | 178 | if (t < 0) |
163 | return NULL; | 179 | return NULL; |
164 | 180 | ||
165 | cpu_relax(); | 181 | cpu_relax(); |
166 | } | 182 | } |
183 | |||
184 | if (likely(!ignore_lockdep(sd))) | ||
185 | rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_); | ||
186 | return sd; | ||
167 | } | 187 | } |
168 | 188 | ||
169 | /** | 189 | /** |
@@ -180,7 +200,8 @@ void sysfs_put_active(struct sysfs_dirent *sd) | |||
180 | if (unlikely(!sd)) | 200 | if (unlikely(!sd)) |
181 | return; | 201 | return; |
182 | 202 | ||
183 | rwsem_release(&sd->dep_map, 1, _RET_IP_); | 203 | if (likely(!ignore_lockdep(sd))) |
204 | rwsem_release(&sd->dep_map, 1, _RET_IP_); | ||
184 | v = atomic_dec_return(&sd->s_active); | 205 | v = atomic_dec_return(&sd->s_active); |
185 | if (likely(v != SD_DEACTIVATED_BIAS)) | 206 | if (likely(v != SD_DEACTIVATED_BIAS)) |
186 | return; | 207 | return; |
@@ -858,7 +879,6 @@ int sysfs_rename(struct sysfs_dirent *sd, | |||
858 | struct sysfs_dirent *new_parent_sd, const void *new_ns, | 879 | struct sysfs_dirent *new_parent_sd, const void *new_ns, |
859 | const char *new_name) | 880 | const char *new_name) |
860 | { | 881 | { |
861 | const char *dup_name = NULL; | ||
862 | int error; | 882 | int error; |
863 | 883 | ||
864 | mutex_lock(&sysfs_mutex); | 884 | mutex_lock(&sysfs_mutex); |
@@ -875,11 +895,11 @@ int sysfs_rename(struct sysfs_dirent *sd, | |||
875 | /* rename sysfs_dirent */ | 895 | /* rename sysfs_dirent */ |
876 | if (strcmp(sd->s_name, new_name) != 0) { | 896 | if (strcmp(sd->s_name, new_name) != 0) { |
877 | error = -ENOMEM; | 897 | error = -ENOMEM; |
878 | new_name = dup_name = kstrdup(new_name, GFP_KERNEL); | 898 | new_name = kstrdup(new_name, GFP_KERNEL); |
879 | if (!new_name) | 899 | if (!new_name) |
880 | goto out; | 900 | goto out; |
881 | 901 | ||
882 | dup_name = sd->s_name; | 902 | kfree(sd->s_name); |
883 | sd->s_name = new_name; | 903 | sd->s_name = new_name; |
884 | } | 904 | } |
885 | 905 | ||
@@ -895,7 +915,6 @@ int sysfs_rename(struct sysfs_dirent *sd, | |||
895 | error = 0; | 915 | error = 0; |
896 | out: | 916 | out: |
897 | mutex_unlock(&sysfs_mutex); | 917 | mutex_unlock(&sysfs_mutex); |
898 | kfree(dup_name); | ||
899 | return error; | 918 | return error; |
900 | } | 919 | } |
901 | 920 | ||
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index f8b0160da2da..ba66d508006a 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig | |||
@@ -11,12 +11,6 @@ config UBIFS_FS | |||
11 | help | 11 | help |
12 | UBIFS is a file system for flash devices which works on top of UBI. | 12 | UBIFS is a file system for flash devices which works on top of UBI. |
13 | 13 | ||
14 | config UBIFS_FS_XATTR | ||
15 | bool "Extended attributes support" | ||
16 | depends on UBIFS_FS | ||
17 | help | ||
18 | This option enables support of extended attributes. | ||
19 | |||
20 | config UBIFS_FS_ADVANCED_COMPR | 14 | config UBIFS_FS_ADVANCED_COMPR |
21 | bool "Advanced compression options" | 15 | bool "Advanced compression options" |
22 | depends on UBIFS_FS | 16 | depends on UBIFS_FS |
@@ -41,20 +35,3 @@ config UBIFS_FS_ZLIB | |||
41 | default y | 35 | default y |
42 | help | 36 | help |
43 | Zlib compresses better than LZO but it is slower. Say 'Y' if unsure. | 37 | Zlib compresses better than LZO but it is slower. Say 'Y' if unsure. |
44 | |||
45 | # Debugging-related stuff | ||
46 | config UBIFS_FS_DEBUG | ||
47 | bool "Enable debugging support" | ||
48 | depends on UBIFS_FS | ||
49 | select DEBUG_FS | ||
50 | select KALLSYMS | ||
51 | help | ||
52 | This option enables UBIFS debugging support. It makes sure various | ||
53 | assertions, self-checks, debugging messages and test modes are compiled | ||
54 | in (this all is compiled out otherwise). Assertions are light-weight | ||
55 | and this option also enables them. Self-checks, debugging messages and | ||
56 | test modes are switched off by default. Thus, it is safe and actually | ||
57 | recommended to have debugging support enabled, and it should not slow | ||
58 | down UBIFS. You can then further enable / disable individual debugging | ||
59 | features using UBIFS module parameters and the corresponding sysfs | ||
60 | interfaces. | ||
diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile index 80e93c35e496..2c6f0cb816b4 100644 --- a/fs/ubifs/Makefile +++ b/fs/ubifs/Makefile | |||
@@ -3,7 +3,4 @@ obj-$(CONFIG_UBIFS_FS) += ubifs.o | |||
3 | ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o | 3 | ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o |
4 | ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o | 4 | ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o |
5 | ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o | 5 | ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o |
6 | ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o | 6 | ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o |
7 | |||
8 | ubifs-$(CONFIG_UBIFS_FS_DEBUG) += debug.o | ||
9 | ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o | ||
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index fb3b5c813a30..8eda717cb99b 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c | |||
@@ -496,7 +496,9 @@ int ubifs_gc_should_commit(struct ubifs_info *c) | |||
496 | return ret; | 496 | return ret; |
497 | } | 497 | } |
498 | 498 | ||
499 | #ifdef CONFIG_UBIFS_FS_DEBUG | 499 | /* |
500 | * Everything below is related to debugging. | ||
501 | */ | ||
500 | 502 | ||
501 | /** | 503 | /** |
502 | * struct idx_node - hold index nodes during index tree traversal. | 504 | * struct idx_node - hold index nodes during index tree traversal. |
@@ -714,14 +716,14 @@ out: | |||
714 | return 0; | 716 | return 0; |
715 | 717 | ||
716 | out_dump: | 718 | out_dump: |
717 | dbg_err("dumping index node (iip=%d)", i->iip); | 719 | ubifs_err("dumping index node (iip=%d)", i->iip); |
718 | dbg_dump_node(c, idx); | 720 | ubifs_dump_node(c, idx); |
719 | list_del(&i->list); | 721 | list_del(&i->list); |
720 | kfree(i); | 722 | kfree(i); |
721 | if (!list_empty(&list)) { | 723 | if (!list_empty(&list)) { |
722 | i = list_entry(list.prev, struct idx_node, list); | 724 | i = list_entry(list.prev, struct idx_node, list); |
723 | dbg_err("dumping parent index node"); | 725 | ubifs_err("dumping parent index node"); |
724 | dbg_dump_node(c, &i->idx); | 726 | ubifs_dump_node(c, &i->idx); |
725 | } | 727 | } |
726 | out_free: | 728 | out_free: |
727 | while (!list_empty(&list)) { | 729 | while (!list_empty(&list)) { |
@@ -734,5 +736,3 @@ out_free: | |||
734 | err = -EINVAL; | 736 | err = -EINVAL; |
735 | return err; | 737 | return err; |
736 | } | 738 | } |
737 | |||
738 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 1934084e2088..685a83756b2b 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -34,8 +34,6 @@ | |||
34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
35 | #include "ubifs.h" | 35 | #include "ubifs.h" |
36 | 36 | ||
37 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
38 | |||
39 | static DEFINE_SPINLOCK(dbg_lock); | 37 | static DEFINE_SPINLOCK(dbg_lock); |
40 | 38 | ||
41 | static const char *get_key_fmt(int fmt) | 39 | static const char *get_key_fmt(int fmt) |
@@ -232,7 +230,7 @@ static void dump_ch(const struct ubifs_ch *ch) | |||
232 | printk(KERN_ERR "\tlen %u\n", le32_to_cpu(ch->len)); | 230 | printk(KERN_ERR "\tlen %u\n", le32_to_cpu(ch->len)); |
233 | } | 231 | } |
234 | 232 | ||
235 | void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) | 233 | void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode) |
236 | { | 234 | { |
237 | const struct ubifs_inode *ui = ubifs_inode(inode); | 235 | const struct ubifs_inode *ui = ubifs_inode(inode); |
238 | struct qstr nm = { .name = NULL }; | 236 | struct qstr nm = { .name = NULL }; |
@@ -300,7 +298,7 @@ void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) | |||
300 | kfree(pdent); | 298 | kfree(pdent); |
301 | } | 299 | } |
302 | 300 | ||
303 | void dbg_dump_node(const struct ubifs_info *c, const void *node) | 301 | void ubifs_dump_node(const struct ubifs_info *c, const void *node) |
304 | { | 302 | { |
305 | int i, n; | 303 | int i, n; |
306 | union ubifs_key key; | 304 | union ubifs_key key; |
@@ -603,7 +601,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) | |||
603 | spin_unlock(&dbg_lock); | 601 | spin_unlock(&dbg_lock); |
604 | } | 602 | } |
605 | 603 | ||
606 | void dbg_dump_budget_req(const struct ubifs_budget_req *req) | 604 | void ubifs_dump_budget_req(const struct ubifs_budget_req *req) |
607 | { | 605 | { |
608 | spin_lock(&dbg_lock); | 606 | spin_lock(&dbg_lock); |
609 | printk(KERN_ERR "Budgeting request: new_ino %d, dirtied_ino %d\n", | 607 | printk(KERN_ERR "Budgeting request: new_ino %d, dirtied_ino %d\n", |
@@ -620,7 +618,7 @@ void dbg_dump_budget_req(const struct ubifs_budget_req *req) | |||
620 | spin_unlock(&dbg_lock); | 618 | spin_unlock(&dbg_lock); |
621 | } | 619 | } |
622 | 620 | ||
623 | void dbg_dump_lstats(const struct ubifs_lp_stats *lst) | 621 | void ubifs_dump_lstats(const struct ubifs_lp_stats *lst) |
624 | { | 622 | { |
625 | spin_lock(&dbg_lock); | 623 | spin_lock(&dbg_lock); |
626 | printk(KERN_ERR "(pid %d) Lprops statistics: empty_lebs %d, " | 624 | printk(KERN_ERR "(pid %d) Lprops statistics: empty_lebs %d, " |
@@ -634,7 +632,7 @@ void dbg_dump_lstats(const struct ubifs_lp_stats *lst) | |||
634 | spin_unlock(&dbg_lock); | 632 | spin_unlock(&dbg_lock); |
635 | } | 633 | } |
636 | 634 | ||
637 | void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) | 635 | void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) |
638 | { | 636 | { |
639 | int i; | 637 | int i; |
640 | struct rb_node *rb; | 638 | struct rb_node *rb; |
@@ -707,7 +705,7 @@ out_unlock: | |||
707 | spin_unlock(&c->space_lock); | 705 | spin_unlock(&c->space_lock); |
708 | } | 706 | } |
709 | 707 | ||
710 | void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) | 708 | void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) |
711 | { | 709 | { |
712 | int i, spc, dark = 0, dead = 0; | 710 | int i, spc, dark = 0, dead = 0; |
713 | struct rb_node *rb; | 711 | struct rb_node *rb; |
@@ -801,7 +799,7 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) | |||
801 | printk(KERN_CONT ")\n"); | 799 | printk(KERN_CONT ")\n"); |
802 | } | 800 | } |
803 | 801 | ||
804 | void dbg_dump_lprops(struct ubifs_info *c) | 802 | void ubifs_dump_lprops(struct ubifs_info *c) |
805 | { | 803 | { |
806 | int lnum, err; | 804 | int lnum, err; |
807 | struct ubifs_lprops lp; | 805 | struct ubifs_lprops lp; |
@@ -810,20 +808,20 @@ void dbg_dump_lprops(struct ubifs_info *c) | |||
810 | printk(KERN_ERR "(pid %d) start dumping LEB properties\n", | 808 | printk(KERN_ERR "(pid %d) start dumping LEB properties\n", |
811 | current->pid); | 809 | current->pid); |
812 | ubifs_get_lp_stats(c, &lst); | 810 | ubifs_get_lp_stats(c, &lst); |
813 | dbg_dump_lstats(&lst); | 811 | ubifs_dump_lstats(&lst); |
814 | 812 | ||
815 | for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { | 813 | for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { |
816 | err = ubifs_read_one_lp(c, lnum, &lp); | 814 | err = ubifs_read_one_lp(c, lnum, &lp); |
817 | if (err) | 815 | if (err) |
818 | ubifs_err("cannot read lprops for LEB %d", lnum); | 816 | ubifs_err("cannot read lprops for LEB %d", lnum); |
819 | 817 | ||
820 | dbg_dump_lprop(c, &lp); | 818 | ubifs_dump_lprop(c, &lp); |
821 | } | 819 | } |
822 | printk(KERN_ERR "(pid %d) finish dumping LEB properties\n", | 820 | printk(KERN_ERR "(pid %d) finish dumping LEB properties\n", |
823 | current->pid); | 821 | current->pid); |
824 | } | 822 | } |
825 | 823 | ||
826 | void dbg_dump_lpt_info(struct ubifs_info *c) | 824 | void ubifs_dump_lpt_info(struct ubifs_info *c) |
827 | { | 825 | { |
828 | int i; | 826 | int i; |
829 | 827 | ||
@@ -862,8 +860,8 @@ void dbg_dump_lpt_info(struct ubifs_info *c) | |||
862 | spin_unlock(&dbg_lock); | 860 | spin_unlock(&dbg_lock); |
863 | } | 861 | } |
864 | 862 | ||
865 | void dbg_dump_sleb(const struct ubifs_info *c, | 863 | void ubifs_dump_sleb(const struct ubifs_info *c, |
866 | const struct ubifs_scan_leb *sleb, int offs) | 864 | const struct ubifs_scan_leb *sleb, int offs) |
867 | { | 865 | { |
868 | struct ubifs_scan_node *snod; | 866 | struct ubifs_scan_node *snod; |
869 | 867 | ||
@@ -874,11 +872,11 @@ void dbg_dump_sleb(const struct ubifs_info *c, | |||
874 | cond_resched(); | 872 | cond_resched(); |
875 | printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", sleb->lnum, | 873 | printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", sleb->lnum, |
876 | snod->offs, snod->len); | 874 | snod->offs, snod->len); |
877 | dbg_dump_node(c, snod->node); | 875 | ubifs_dump_node(c, snod->node); |
878 | } | 876 | } |
879 | } | 877 | } |
880 | 878 | ||
881 | void dbg_dump_leb(const struct ubifs_info *c, int lnum) | 879 | void ubifs_dump_leb(const struct ubifs_info *c, int lnum) |
882 | { | 880 | { |
883 | struct ubifs_scan_leb *sleb; | 881 | struct ubifs_scan_leb *sleb; |
884 | struct ubifs_scan_node *snod; | 882 | struct ubifs_scan_node *snod; |
@@ -909,7 +907,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) | |||
909 | cond_resched(); | 907 | cond_resched(); |
910 | printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", lnum, | 908 | printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", lnum, |
911 | snod->offs, snod->len); | 909 | snod->offs, snod->len); |
912 | dbg_dump_node(c, snod->node); | 910 | ubifs_dump_node(c, snod->node); |
913 | } | 911 | } |
914 | 912 | ||
915 | printk(KERN_ERR "(pid %d) finish dumping LEB %d\n", | 913 | printk(KERN_ERR "(pid %d) finish dumping LEB %d\n", |
@@ -921,8 +919,8 @@ out: | |||
921 | return; | 919 | return; |
922 | } | 920 | } |
923 | 921 | ||
924 | void dbg_dump_znode(const struct ubifs_info *c, | 922 | void ubifs_dump_znode(const struct ubifs_info *c, |
925 | const struct ubifs_znode *znode) | 923 | const struct ubifs_znode *znode) |
926 | { | 924 | { |
927 | int n; | 925 | int n; |
928 | const struct ubifs_zbranch *zbr; | 926 | const struct ubifs_zbranch *zbr; |
@@ -965,7 +963,7 @@ void dbg_dump_znode(const struct ubifs_info *c, | |||
965 | spin_unlock(&dbg_lock); | 963 | spin_unlock(&dbg_lock); |
966 | } | 964 | } |
967 | 965 | ||
968 | void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) | 966 | void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) |
969 | { | 967 | { |
970 | int i; | 968 | int i; |
971 | 969 | ||
@@ -981,8 +979,8 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) | |||
981 | printk(KERN_ERR "(pid %d) finish dumping heap\n", current->pid); | 979 | printk(KERN_ERR "(pid %d) finish dumping heap\n", current->pid); |
982 | } | 980 | } |
983 | 981 | ||
984 | void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | 982 | void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, |
985 | struct ubifs_nnode *parent, int iip) | 983 | struct ubifs_nnode *parent, int iip) |
986 | { | 984 | { |
987 | int i; | 985 | int i; |
988 | 986 | ||
@@ -999,7 +997,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
999 | } | 997 | } |
1000 | } | 998 | } |
1001 | 999 | ||
1002 | void dbg_dump_tnc(struct ubifs_info *c) | 1000 | void ubifs_dump_tnc(struct ubifs_info *c) |
1003 | { | 1001 | { |
1004 | struct ubifs_znode *znode; | 1002 | struct ubifs_znode *znode; |
1005 | int level; | 1003 | int level; |
@@ -1014,7 +1012,7 @@ void dbg_dump_tnc(struct ubifs_info *c) | |||
1014 | level = znode->level; | 1012 | level = znode->level; |
1015 | printk(KERN_ERR "== Level %d ==\n", level); | 1013 | printk(KERN_ERR "== Level %d ==\n", level); |
1016 | } | 1014 | } |
1017 | dbg_dump_znode(c, znode); | 1015 | ubifs_dump_znode(c, znode); |
1018 | znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); | 1016 | znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); |
1019 | } | 1017 | } |
1020 | printk(KERN_ERR "(pid %d) finish dumping TNC tree\n", current->pid); | 1018 | printk(KERN_ERR "(pid %d) finish dumping TNC tree\n", current->pid); |
@@ -1023,18 +1021,18 @@ void dbg_dump_tnc(struct ubifs_info *c) | |||
1023 | static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, | 1021 | static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, |
1024 | void *priv) | 1022 | void *priv) |
1025 | { | 1023 | { |
1026 | dbg_dump_znode(c, znode); | 1024 | ubifs_dump_znode(c, znode); |
1027 | return 0; | 1025 | return 0; |
1028 | } | 1026 | } |
1029 | 1027 | ||
1030 | /** | 1028 | /** |
1031 | * dbg_dump_index - dump the on-flash index. | 1029 | * ubifs_dump_index - dump the on-flash index. |
1032 | * @c: UBIFS file-system description object | 1030 | * @c: UBIFS file-system description object |
1033 | * | 1031 | * |
1034 | * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()' | 1032 | * This function dumps whole UBIFS indexing B-tree, unlike 'ubifs_dump_tnc()' |
1035 | * which dumps only in-memory znodes and does not read znodes which from flash. | 1033 | * which dumps only in-memory znodes and does not read znodes which from flash. |
1036 | */ | 1034 | */ |
1037 | void dbg_dump_index(struct ubifs_info *c) | 1035 | void ubifs_dump_index(struct ubifs_info *c) |
1038 | { | 1036 | { |
1039 | dbg_walk_index(c, NULL, dump_znode, NULL); | 1037 | dbg_walk_index(c, NULL, dump_znode, NULL); |
1040 | } | 1038 | } |
@@ -1120,15 +1118,15 @@ int dbg_check_space_info(struct ubifs_info *c) | |||
1120 | 1118 | ||
1121 | out: | 1119 | out: |
1122 | ubifs_msg("saved lprops statistics dump"); | 1120 | ubifs_msg("saved lprops statistics dump"); |
1123 | dbg_dump_lstats(&d->saved_lst); | 1121 | ubifs_dump_lstats(&d->saved_lst); |
1124 | ubifs_msg("saved budgeting info dump"); | 1122 | ubifs_msg("saved budgeting info dump"); |
1125 | dbg_dump_budg(c, &d->saved_bi); | 1123 | ubifs_dump_budg(c, &d->saved_bi); |
1126 | ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt); | 1124 | ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt); |
1127 | ubifs_msg("current lprops statistics dump"); | 1125 | ubifs_msg("current lprops statistics dump"); |
1128 | ubifs_get_lp_stats(c, &lst); | 1126 | ubifs_get_lp_stats(c, &lst); |
1129 | dbg_dump_lstats(&lst); | 1127 | ubifs_dump_lstats(&lst); |
1130 | ubifs_msg("current budgeting info dump"); | 1128 | ubifs_msg("current budgeting info dump"); |
1131 | dbg_dump_budg(c, &c->bi); | 1129 | ubifs_dump_budg(c, &c->bi); |
1132 | dump_stack(); | 1130 | dump_stack(); |
1133 | return -EINVAL; | 1131 | return -EINVAL; |
1134 | } | 1132 | } |
@@ -1160,7 +1158,7 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode) | |||
1160 | "is clean", ui->ui_size, ui->synced_i_size); | 1158 | "is clean", ui->ui_size, ui->synced_i_size); |
1161 | ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino, | 1159 | ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino, |
1162 | inode->i_mode, i_size_read(inode)); | 1160 | inode->i_mode, i_size_read(inode)); |
1163 | dbg_dump_stack(); | 1161 | dump_stack(); |
1164 | err = -EINVAL; | 1162 | err = -EINVAL; |
1165 | } | 1163 | } |
1166 | spin_unlock(&ui->ui_lock); | 1164 | spin_unlock(&ui->ui_lock); |
@@ -1223,14 +1221,14 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) | |||
1223 | "but calculated size is %llu", dir->i_ino, | 1221 | "but calculated size is %llu", dir->i_ino, |
1224 | (unsigned long long)i_size_read(dir), | 1222 | (unsigned long long)i_size_read(dir), |
1225 | (unsigned long long)size); | 1223 | (unsigned long long)size); |
1226 | dbg_dump_inode(c, dir); | 1224 | ubifs_dump_inode(c, dir); |
1227 | dump_stack(); | 1225 | dump_stack(); |
1228 | return -EINVAL; | 1226 | return -EINVAL; |
1229 | } | 1227 | } |
1230 | if (dir->i_nlink != nlink) { | 1228 | if (dir->i_nlink != nlink) { |
1231 | ubifs_err("directory inode %lu has nlink %u, but calculated " | 1229 | ubifs_err("directory inode %lu has nlink %u, but calculated " |
1232 | "nlink is %u", dir->i_ino, dir->i_nlink, nlink); | 1230 | "nlink is %u", dir->i_ino, dir->i_nlink, nlink); |
1233 | dbg_dump_inode(c, dir); | 1231 | ubifs_dump_inode(c, dir); |
1234 | dump_stack(); | 1232 | dump_stack(); |
1235 | return -EINVAL; | 1233 | return -EINVAL; |
1236 | } | 1234 | } |
@@ -1287,25 +1285,25 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, | |||
1287 | err = 1; | 1285 | err = 1; |
1288 | key_read(c, &dent1->key, &key); | 1286 | key_read(c, &dent1->key, &key); |
1289 | if (keys_cmp(c, &zbr1->key, &key)) { | 1287 | if (keys_cmp(c, &zbr1->key, &key)) { |
1290 | dbg_err("1st entry at %d:%d has key %s", zbr1->lnum, | 1288 | ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, |
1291 | zbr1->offs, dbg_snprintf_key(c, &key, key_buf, | 1289 | zbr1->offs, dbg_snprintf_key(c, &key, key_buf, |
1292 | DBG_KEY_BUF_LEN)); | 1290 | DBG_KEY_BUF_LEN)); |
1293 | dbg_err("but it should have key %s according to tnc", | 1291 | ubifs_err("but it should have key %s according to tnc", |
1294 | dbg_snprintf_key(c, &zbr1->key, key_buf, | 1292 | dbg_snprintf_key(c, &zbr1->key, key_buf, |
1295 | DBG_KEY_BUF_LEN)); | 1293 | DBG_KEY_BUF_LEN)); |
1296 | dbg_dump_node(c, dent1); | 1294 | ubifs_dump_node(c, dent1); |
1297 | goto out_free; | 1295 | goto out_free; |
1298 | } | 1296 | } |
1299 | 1297 | ||
1300 | key_read(c, &dent2->key, &key); | 1298 | key_read(c, &dent2->key, &key); |
1301 | if (keys_cmp(c, &zbr2->key, &key)) { | 1299 | if (keys_cmp(c, &zbr2->key, &key)) { |
1302 | dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum, | 1300 | ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, |
1303 | zbr1->offs, dbg_snprintf_key(c, &key, key_buf, | 1301 | zbr1->offs, dbg_snprintf_key(c, &key, key_buf, |
1304 | DBG_KEY_BUF_LEN)); | 1302 | DBG_KEY_BUF_LEN)); |
1305 | dbg_err("but it should have key %s according to tnc", | 1303 | ubifs_err("but it should have key %s according to tnc", |
1306 | dbg_snprintf_key(c, &zbr2->key, key_buf, | 1304 | dbg_snprintf_key(c, &zbr2->key, key_buf, |
1307 | DBG_KEY_BUF_LEN)); | 1305 | DBG_KEY_BUF_LEN)); |
1308 | dbg_dump_node(c, dent2); | 1306 | ubifs_dump_node(c, dent2); |
1309 | goto out_free; | 1307 | goto out_free; |
1310 | } | 1308 | } |
1311 | 1309 | ||
@@ -1318,15 +1316,15 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, | |||
1318 | goto out_free; | 1316 | goto out_free; |
1319 | } | 1317 | } |
1320 | if (cmp == 0 && nlen1 == nlen2) | 1318 | if (cmp == 0 && nlen1 == nlen2) |
1321 | dbg_err("2 xent/dent nodes with the same name"); | 1319 | ubifs_err("2 xent/dent nodes with the same name"); |
1322 | else | 1320 | else |
1323 | dbg_err("bad order of colliding key %s", | 1321 | ubifs_err("bad order of colliding key %s", |
1324 | dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); | 1322 | dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); |
1325 | 1323 | ||
1326 | ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); | 1324 | ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); |
1327 | dbg_dump_node(c, dent1); | 1325 | ubifs_dump_node(c, dent1); |
1328 | ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); | 1326 | ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); |
1329 | dbg_dump_node(c, dent2); | 1327 | ubifs_dump_node(c, dent2); |
1330 | 1328 | ||
1331 | out_free: | 1329 | out_free: |
1332 | kfree(dent2); | 1330 | kfree(dent2); |
@@ -1529,10 +1527,10 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) | |||
1529 | out: | 1527 | out: |
1530 | ubifs_err("failed, error %d", err); | 1528 | ubifs_err("failed, error %d", err); |
1531 | ubifs_msg("dump of the znode"); | 1529 | ubifs_msg("dump of the znode"); |
1532 | dbg_dump_znode(c, znode); | 1530 | ubifs_dump_znode(c, znode); |
1533 | if (zp) { | 1531 | if (zp) { |
1534 | ubifs_msg("dump of the parent znode"); | 1532 | ubifs_msg("dump of the parent znode"); |
1535 | dbg_dump_znode(c, zp); | 1533 | ubifs_dump_znode(c, zp); |
1536 | } | 1534 | } |
1537 | dump_stack(); | 1535 | dump_stack(); |
1538 | return -EINVAL; | 1536 | return -EINVAL; |
@@ -1599,9 +1597,9 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) | |||
1599 | return err; | 1597 | return err; |
1600 | if (err) { | 1598 | if (err) { |
1601 | ubifs_msg("first znode"); | 1599 | ubifs_msg("first znode"); |
1602 | dbg_dump_znode(c, prev); | 1600 | ubifs_dump_znode(c, prev); |
1603 | ubifs_msg("second znode"); | 1601 | ubifs_msg("second znode"); |
1604 | dbg_dump_znode(c, znode); | 1602 | ubifs_dump_znode(c, znode); |
1605 | return -EINVAL; | 1603 | return -EINVAL; |
1606 | } | 1604 | } |
1607 | } | 1605 | } |
@@ -1690,7 +1688,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, | |||
1690 | if (err) { | 1688 | if (err) { |
1691 | ubifs_err("znode checking function returned " | 1689 | ubifs_err("znode checking function returned " |
1692 | "error %d", err); | 1690 | "error %d", err); |
1693 | dbg_dump_znode(c, znode); | 1691 | ubifs_dump_znode(c, znode); |
1694 | goto out_dump; | 1692 | goto out_dump; |
1695 | } | 1693 | } |
1696 | } | 1694 | } |
@@ -1758,7 +1756,7 @@ out_dump: | |||
1758 | else | 1756 | else |
1759 | zbr = &c->zroot; | 1757 | zbr = &c->zroot; |
1760 | ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); | 1758 | ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); |
1761 | dbg_dump_znode(c, znode); | 1759 | ubifs_dump_znode(c, znode); |
1762 | out_unlock: | 1760 | out_unlock: |
1763 | mutex_unlock(&c->tnc_mutex); | 1761 | mutex_unlock(&c->tnc_mutex); |
1764 | return err; | 1762 | return err; |
@@ -2194,7 +2192,7 @@ out: | |||
2194 | 2192 | ||
2195 | out_dump: | 2193 | out_dump: |
2196 | ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs); | 2194 | ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs); |
2197 | dbg_dump_node(c, node); | 2195 | ubifs_dump_node(c, node); |
2198 | out_free: | 2196 | out_free: |
2199 | kfree(node); | 2197 | kfree(node); |
2200 | return err; | 2198 | return err; |
@@ -2352,7 +2350,7 @@ out_dump: | |||
2352 | 2350 | ||
2353 | ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", | 2351 | ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", |
2354 | (unsigned long)fscki->inum, zbr->lnum, zbr->offs); | 2352 | (unsigned long)fscki->inum, zbr->lnum, zbr->offs); |
2355 | dbg_dump_node(c, ino); | 2353 | ubifs_dump_node(c, ino); |
2356 | kfree(ino); | 2354 | kfree(ino); |
2357 | return -EINVAL; | 2355 | return -EINVAL; |
2358 | } | 2356 | } |
@@ -2423,12 +2421,12 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) | |||
2423 | 2421 | ||
2424 | if (sa->type != UBIFS_DATA_NODE) { | 2422 | if (sa->type != UBIFS_DATA_NODE) { |
2425 | ubifs_err("bad node type %d", sa->type); | 2423 | ubifs_err("bad node type %d", sa->type); |
2426 | dbg_dump_node(c, sa->node); | 2424 | ubifs_dump_node(c, sa->node); |
2427 | return -EINVAL; | 2425 | return -EINVAL; |
2428 | } | 2426 | } |
2429 | if (sb->type != UBIFS_DATA_NODE) { | 2427 | if (sb->type != UBIFS_DATA_NODE) { |
2430 | ubifs_err("bad node type %d", sb->type); | 2428 | ubifs_err("bad node type %d", sb->type); |
2431 | dbg_dump_node(c, sb->node); | 2429 | ubifs_dump_node(c, sb->node); |
2432 | return -EINVAL; | 2430 | return -EINVAL; |
2433 | } | 2431 | } |
2434 | 2432 | ||
@@ -2459,8 +2457,8 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) | |||
2459 | return 0; | 2457 | return 0; |
2460 | 2458 | ||
2461 | error_dump: | 2459 | error_dump: |
2462 | dbg_dump_node(c, sa->node); | 2460 | ubifs_dump_node(c, sa->node); |
2463 | dbg_dump_node(c, sb->node); | 2461 | ubifs_dump_node(c, sb->node); |
2464 | return -EINVAL; | 2462 | return -EINVAL; |
2465 | } | 2463 | } |
2466 | 2464 | ||
@@ -2491,13 +2489,13 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) | |||
2491 | if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && | 2489 | if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && |
2492 | sa->type != UBIFS_XENT_NODE) { | 2490 | sa->type != UBIFS_XENT_NODE) { |
2493 | ubifs_err("bad node type %d", sa->type); | 2491 | ubifs_err("bad node type %d", sa->type); |
2494 | dbg_dump_node(c, sa->node); | 2492 | ubifs_dump_node(c, sa->node); |
2495 | return -EINVAL; | 2493 | return -EINVAL; |
2496 | } | 2494 | } |
2497 | if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && | 2495 | if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE && |
2498 | sa->type != UBIFS_XENT_NODE) { | 2496 | sa->type != UBIFS_XENT_NODE) { |
2499 | ubifs_err("bad node type %d", sb->type); | 2497 | ubifs_err("bad node type %d", sb->type); |
2500 | dbg_dump_node(c, sb->node); | 2498 | ubifs_dump_node(c, sb->node); |
2501 | return -EINVAL; | 2499 | return -EINVAL; |
2502 | } | 2500 | } |
2503 | 2501 | ||
@@ -2547,9 +2545,9 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) | |||
2547 | 2545 | ||
2548 | error_dump: | 2546 | error_dump: |
2549 | ubifs_msg("dumping first node"); | 2547 | ubifs_msg("dumping first node"); |
2550 | dbg_dump_node(c, sa->node); | 2548 | ubifs_dump_node(c, sa->node); |
2551 | ubifs_msg("dumping second node"); | 2549 | ubifs_msg("dumping second node"); |
2552 | dbg_dump_node(c, sb->node); | 2550 | ubifs_dump_node(c, sb->node); |
2553 | return -EINVAL; | 2551 | return -EINVAL; |
2554 | return 0; | 2552 | return 0; |
2555 | } | 2553 | } |
@@ -2678,7 +2676,7 @@ static void cut_data(const void *buf, unsigned int len) | |||
2678 | } | 2676 | } |
2679 | 2677 | ||
2680 | int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, | 2678 | int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, |
2681 | int offs, int len, int dtype) | 2679 | int offs, int len) |
2682 | { | 2680 | { |
2683 | int err, failing; | 2681 | int err, failing; |
2684 | 2682 | ||
@@ -2688,7 +2686,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, | |||
2688 | failing = power_cut_emulated(c, lnum, 1); | 2686 | failing = power_cut_emulated(c, lnum, 1); |
2689 | if (failing) | 2687 | if (failing) |
2690 | cut_data(buf, len); | 2688 | cut_data(buf, len); |
2691 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); | 2689 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len); |
2692 | if (err) | 2690 | if (err) |
2693 | return err; | 2691 | return err; |
2694 | if (failing) | 2692 | if (failing) |
@@ -2697,7 +2695,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, | |||
2697 | } | 2695 | } |
2698 | 2696 | ||
2699 | int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, | 2697 | int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, |
2700 | int len, int dtype) | 2698 | int len) |
2701 | { | 2699 | { |
2702 | int err; | 2700 | int err; |
2703 | 2701 | ||
@@ -2705,7 +2703,7 @@ int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, | |||
2705 | return -EROFS; | 2703 | return -EROFS; |
2706 | if (power_cut_emulated(c, lnum, 1)) | 2704 | if (power_cut_emulated(c, lnum, 1)) |
2707 | return -EROFS; | 2705 | return -EROFS; |
2708 | err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); | 2706 | err = ubi_leb_change(c->ubi, lnum, buf, len); |
2709 | if (err) | 2707 | if (err) |
2710 | return err; | 2708 | return err; |
2711 | if (power_cut_emulated(c, lnum, 1)) | 2709 | if (power_cut_emulated(c, lnum, 1)) |
@@ -2729,7 +2727,7 @@ int dbg_leb_unmap(struct ubifs_info *c, int lnum) | |||
2729 | return 0; | 2727 | return 0; |
2730 | } | 2728 | } |
2731 | 2729 | ||
2732 | int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype) | 2730 | int dbg_leb_map(struct ubifs_info *c, int lnum) |
2733 | { | 2731 | { |
2734 | int err; | 2732 | int err; |
2735 | 2733 | ||
@@ -2737,7 +2735,7 @@ int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype) | |||
2737 | return -EROFS; | 2735 | return -EROFS; |
2738 | if (power_cut_emulated(c, lnum, 0)) | 2736 | if (power_cut_emulated(c, lnum, 0)) |
2739 | return -EROFS; | 2737 | return -EROFS; |
2740 | err = ubi_leb_map(c->ubi, lnum, dtype); | 2738 | err = ubi_leb_map(c->ubi, lnum); |
2741 | if (err) | 2739 | if (err) |
2742 | return err; | 2740 | return err; |
2743 | if (power_cut_emulated(c, lnum, 0)) | 2741 | if (power_cut_emulated(c, lnum, 0)) |
@@ -2857,16 +2855,16 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, | |||
2857 | * 'ubifs-debug' file-system instead. | 2855 | * 'ubifs-debug' file-system instead. |
2858 | */ | 2856 | */ |
2859 | if (file->f_path.dentry == d->dfs_dump_lprops) { | 2857 | if (file->f_path.dentry == d->dfs_dump_lprops) { |
2860 | dbg_dump_lprops(c); | 2858 | ubifs_dump_lprops(c); |
2861 | return count; | 2859 | return count; |
2862 | } | 2860 | } |
2863 | if (file->f_path.dentry == d->dfs_dump_budg) { | 2861 | if (file->f_path.dentry == d->dfs_dump_budg) { |
2864 | dbg_dump_budg(c, &c->bi); | 2862 | ubifs_dump_budg(c, &c->bi); |
2865 | return count; | 2863 | return count; |
2866 | } | 2864 | } |
2867 | if (file->f_path.dentry == d->dfs_dump_tnc) { | 2865 | if (file->f_path.dentry == d->dfs_dump_tnc) { |
2868 | mutex_lock(&c->tnc_mutex); | 2866 | mutex_lock(&c->tnc_mutex); |
2869 | dbg_dump_tnc(c); | 2867 | ubifs_dump_tnc(c); |
2870 | mutex_unlock(&c->tnc_mutex); | 2868 | mutex_unlock(&c->tnc_mutex); |
2871 | return count; | 2869 | return count; |
2872 | } | 2870 | } |
@@ -3189,5 +3187,3 @@ void ubifs_debugging_exit(struct ubifs_info *c) | |||
3189 | { | 3187 | { |
3190 | kfree(c->dbg); | 3188 | kfree(c->dbg); |
3191 | } | 3189 | } |
3192 | |||
3193 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 9f717655df18..486a8e024fb6 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h | |||
@@ -29,8 +29,6 @@ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, | |||
29 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, | 29 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, |
30 | struct ubifs_znode *znode, void *priv); | 30 | struct ubifs_znode *znode, void *priv); |
31 | 31 | ||
32 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
33 | |||
34 | /* | 32 | /* |
35 | * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi" | 33 | * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi" |
36 | * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte. | 34 | * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte. |
@@ -149,7 +147,7 @@ struct ubifs_global_debug_info { | |||
149 | if (unlikely(!(expr))) { \ | 147 | if (unlikely(!(expr))) { \ |
150 | printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \ | 148 | printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \ |
151 | __func__, __LINE__, current->pid); \ | 149 | __func__, __LINE__, current->pid); \ |
152 | dbg_dump_stack(); \ | 150 | dump_stack(); \ |
153 | } \ | 151 | } \ |
154 | } while (0) | 152 | } while (0) |
155 | 153 | ||
@@ -161,12 +159,6 @@ struct ubifs_global_debug_info { | |||
161 | } \ | 159 | } \ |
162 | } while (0) | 160 | } while (0) |
163 | 161 | ||
164 | #define dbg_dump_stack() dump_stack() | ||
165 | |||
166 | #define dbg_err(fmt, ...) do { \ | ||
167 | ubifs_err(fmt, ##__VA_ARGS__); \ | ||
168 | } while (0) | ||
169 | |||
170 | #define ubifs_dbg_msg(type, fmt, ...) \ | 162 | #define ubifs_dbg_msg(type, fmt, ...) \ |
171 | pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) | 163 | pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) |
172 | 164 | ||
@@ -257,27 +249,27 @@ const char *dbg_get_key_dump(const struct ubifs_info *c, | |||
257 | const union ubifs_key *key); | 249 | const union ubifs_key *key); |
258 | const char *dbg_snprintf_key(const struct ubifs_info *c, | 250 | const char *dbg_snprintf_key(const struct ubifs_info *c, |
259 | const union ubifs_key *key, char *buffer, int len); | 251 | const union ubifs_key *key, char *buffer, int len); |
260 | void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode); | 252 | void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode); |
261 | void dbg_dump_node(const struct ubifs_info *c, const void *node); | 253 | void ubifs_dump_node(const struct ubifs_info *c, const void *node); |
262 | void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum, | 254 | void ubifs_dump_budget_req(const struct ubifs_budget_req *req); |
263 | int offs); | 255 | void ubifs_dump_lstats(const struct ubifs_lp_stats *lst); |
264 | void dbg_dump_budget_req(const struct ubifs_budget_req *req); | 256 | void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi); |
265 | void dbg_dump_lstats(const struct ubifs_lp_stats *lst); | 257 | void ubifs_dump_lprop(const struct ubifs_info *c, |
266 | void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi); | 258 | const struct ubifs_lprops *lp); |
267 | void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp); | 259 | void ubifs_dump_lprops(struct ubifs_info *c); |
268 | void dbg_dump_lprops(struct ubifs_info *c); | 260 | void ubifs_dump_lpt_info(struct ubifs_info *c); |
269 | void dbg_dump_lpt_info(struct ubifs_info *c); | 261 | void ubifs_dump_leb(const struct ubifs_info *c, int lnum); |
270 | void dbg_dump_leb(const struct ubifs_info *c, int lnum); | 262 | void ubifs_dump_sleb(const struct ubifs_info *c, |
271 | void dbg_dump_sleb(const struct ubifs_info *c, | 263 | const struct ubifs_scan_leb *sleb, int offs); |
272 | const struct ubifs_scan_leb *sleb, int offs); | 264 | void ubifs_dump_znode(const struct ubifs_info *c, |
273 | void dbg_dump_znode(const struct ubifs_info *c, | 265 | const struct ubifs_znode *znode); |
274 | const struct ubifs_znode *znode); | 266 | void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, |
275 | void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat); | 267 | int cat); |
276 | void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | 268 | void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, |
277 | struct ubifs_nnode *parent, int iip); | 269 | struct ubifs_nnode *parent, int iip); |
278 | void dbg_dump_tnc(struct ubifs_info *c); | 270 | void ubifs_dump_tnc(struct ubifs_info *c); |
279 | void dbg_dump_index(struct ubifs_info *c); | 271 | void ubifs_dump_index(struct ubifs_info *c); |
280 | void dbg_dump_lpt_lebs(const struct ubifs_info *c); | 272 | void ubifs_dump_lpt_lebs(const struct ubifs_info *c); |
281 | 273 | ||
282 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, | 274 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, |
283 | dbg_znode_callback znode_cb, void *priv); | 275 | dbg_znode_callback znode_cb, void *priv); |
@@ -307,11 +299,10 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); | |||
307 | int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); | 299 | int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); |
308 | 300 | ||
309 | int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, | 301 | int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, |
310 | int len, int dtype); | 302 | int len); |
311 | int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, | 303 | int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len); |
312 | int dtype); | ||
313 | int dbg_leb_unmap(struct ubifs_info *c, int lnum); | 304 | int dbg_leb_unmap(struct ubifs_info *c, int lnum); |
314 | int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype); | 305 | int dbg_leb_map(struct ubifs_info *c, int lnum); |
315 | 306 | ||
316 | /* Debugfs-related stuff */ | 307 | /* Debugfs-related stuff */ |
317 | int dbg_debugfs_init(void); | 308 | int dbg_debugfs_init(void); |
@@ -319,162 +310,4 @@ void dbg_debugfs_exit(void); | |||
319 | int dbg_debugfs_init_fs(struct ubifs_info *c); | 310 | int dbg_debugfs_init_fs(struct ubifs_info *c); |
320 | void dbg_debugfs_exit_fs(struct ubifs_info *c); | 311 | void dbg_debugfs_exit_fs(struct ubifs_info *c); |
321 | 312 | ||
322 | #else /* !CONFIG_UBIFS_FS_DEBUG */ | ||
323 | |||
324 | /* Use "if (0)" to make compiler check arguments even if debugging is off */ | ||
325 | #define ubifs_assert(expr) do { \ | ||
326 | if (0) \ | ||
327 | printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \ | ||
328 | __func__, __LINE__, current->pid); \ | ||
329 | } while (0) | ||
330 | |||
331 | #define dbg_err(fmt, ...) do { \ | ||
332 | if (0) \ | ||
333 | ubifs_err(fmt, ##__VA_ARGS__); \ | ||
334 | } while (0) | ||
335 | |||
336 | #define DBGKEY(key) ((char *)(key)) | ||
337 | #define DBGKEY1(key) ((char *)(key)) | ||
338 | |||
339 | #define ubifs_dbg_msg(fmt, ...) do { \ | ||
340 | if (0) \ | ||
341 | printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ | ||
342 | } while (0) | ||
343 | |||
344 | #define dbg_dump_stack() | ||
345 | #define ubifs_assert_cmt_locked(c) | ||
346 | |||
347 | #define dbg_msg(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
348 | #define dbg_gen(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
349 | #define dbg_jnl(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
350 | #define dbg_jnlk(key, fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
351 | #define dbg_tnc(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
352 | #define dbg_tnck(key, fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
353 | #define dbg_lp(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
354 | #define dbg_find(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
355 | #define dbg_mnt(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
356 | #define dbg_mntk(key, fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
357 | #define dbg_io(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
358 | #define dbg_cmt(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
359 | #define dbg_budg(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
360 | #define dbg_log(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
361 | #define dbg_gc(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
362 | #define dbg_scan(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
363 | #define dbg_rcvry(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) | ||
364 | |||
365 | static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; } | ||
366 | static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; } | ||
367 | static inline const char *dbg_ntype(int type) { return ""; } | ||
368 | static inline const char *dbg_cstate(int cmt_state) { return ""; } | ||
369 | static inline const char *dbg_jhead(int jhead) { return ""; } | ||
370 | static inline const char * | ||
371 | dbg_get_key_dump(const struct ubifs_info *c, | ||
372 | const union ubifs_key *key) { return ""; } | ||
373 | static inline const char * | ||
374 | dbg_snprintf_key(const struct ubifs_info *c, | ||
375 | const union ubifs_key *key, char *buffer, | ||
376 | int len) { return ""; } | ||
377 | static inline void dbg_dump_inode(struct ubifs_info *c, | ||
378 | const struct inode *inode) { return; } | ||
379 | static inline void dbg_dump_node(const struct ubifs_info *c, | ||
380 | const void *node) { return; } | ||
381 | static inline void dbg_dump_lpt_node(const struct ubifs_info *c, | ||
382 | void *node, int lnum, | ||
383 | int offs) { return; } | ||
384 | static inline void | ||
385 | dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; } | ||
386 | static inline void | ||
387 | dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; } | ||
388 | static inline void | ||
389 | dbg_dump_budg(struct ubifs_info *c, | ||
390 | const struct ubifs_budg_info *bi) { return; } | ||
391 | static inline void dbg_dump_lprop(const struct ubifs_info *c, | ||
392 | const struct ubifs_lprops *lp) { return; } | ||
393 | static inline void dbg_dump_lprops(struct ubifs_info *c) { return; } | ||
394 | static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; } | ||
395 | static inline void dbg_dump_leb(const struct ubifs_info *c, | ||
396 | int lnum) { return; } | ||
397 | static inline void | ||
398 | dbg_dump_sleb(const struct ubifs_info *c, | ||
399 | const struct ubifs_scan_leb *sleb, int offs) { return; } | ||
400 | static inline void | ||
401 | dbg_dump_znode(const struct ubifs_info *c, | ||
402 | const struct ubifs_znode *znode) { return; } | ||
403 | static inline void dbg_dump_heap(struct ubifs_info *c, | ||
404 | struct ubifs_lpt_heap *heap, | ||
405 | int cat) { return; } | ||
406 | static inline void dbg_dump_pnode(struct ubifs_info *c, | ||
407 | struct ubifs_pnode *pnode, | ||
408 | struct ubifs_nnode *parent, | ||
409 | int iip) { return; } | ||
410 | static inline void dbg_dump_tnc(struct ubifs_info *c) { return; } | ||
411 | static inline void dbg_dump_index(struct ubifs_info *c) { return; } | ||
412 | static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; } | ||
413 | |||
414 | static inline int dbg_walk_index(struct ubifs_info *c, | ||
415 | dbg_leaf_callback leaf_cb, | ||
416 | dbg_znode_callback znode_cb, | ||
417 | void *priv) { return 0; } | ||
418 | static inline void dbg_save_space_info(struct ubifs_info *c) { return; } | ||
419 | static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; } | ||
420 | static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; } | ||
421 | static inline int | ||
422 | dbg_old_index_check_init(struct ubifs_info *c, | ||
423 | struct ubifs_zbranch *zroot) { return 0; } | ||
424 | static inline int | ||
425 | dbg_check_old_index(struct ubifs_info *c, | ||
426 | struct ubifs_zbranch *zroot) { return 0; } | ||
427 | static inline int dbg_check_cats(struct ubifs_info *c) { return 0; } | ||
428 | static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; } | ||
429 | static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } | ||
430 | static inline int dbg_chk_lpt_sz(struct ubifs_info *c, | ||
431 | int action, int len) { return 0; } | ||
432 | static inline int | ||
433 | dbg_check_synced_i_size(const struct ubifs_info *c, | ||
434 | struct inode *inode) { return 0; } | ||
435 | static inline int dbg_check_dir(struct ubifs_info *c, | ||
436 | const struct inode *dir) { return 0; } | ||
437 | static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } | ||
438 | static inline int dbg_check_idx_size(struct ubifs_info *c, | ||
439 | long long idx_size) { return 0; } | ||
440 | static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; } | ||
441 | static inline void dbg_check_heap(struct ubifs_info *c, | ||
442 | struct ubifs_lpt_heap *heap, | ||
443 | int cat, int add_pos) { return; } | ||
444 | static inline int dbg_check_lpt_nodes(struct ubifs_info *c, | ||
445 | struct ubifs_cnode *cnode, int row, int col) { return 0; } | ||
446 | static inline int dbg_check_inode_size(struct ubifs_info *c, | ||
447 | const struct inode *inode, | ||
448 | loff_t size) { return 0; } | ||
449 | static inline int | ||
450 | dbg_check_data_nodes_order(struct ubifs_info *c, | ||
451 | struct list_head *head) { return 0; } | ||
452 | static inline int | ||
453 | dbg_check_nondata_nodes_order(struct ubifs_info *c, | ||
454 | struct list_head *head) { return 0; } | ||
455 | |||
456 | static inline int dbg_leb_write(struct ubifs_info *c, int lnum, | ||
457 | const void *buf, int offset, | ||
458 | int len, int dtype) { return 0; } | ||
459 | static inline int dbg_leb_change(struct ubifs_info *c, int lnum, | ||
460 | const void *buf, int len, | ||
461 | int dtype) { return 0; } | ||
462 | static inline int dbg_leb_unmap(struct ubifs_info *c, int lnum) { return 0; } | ||
463 | static inline int dbg_leb_map(struct ubifs_info *c, int lnum, | ||
464 | int dtype) { return 0; } | ||
465 | |||
466 | static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; } | ||
467 | static inline int dbg_is_chk_index(const struct ubifs_info *c) { return 0; } | ||
468 | static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return 0; } | ||
469 | static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return 0; } | ||
470 | static inline int dbg_is_chk_fs(const struct ubifs_info *c) { return 0; } | ||
471 | static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return 0; } | ||
472 | static inline int dbg_is_power_cut(const struct ubifs_info *c) { return 0; } | ||
473 | |||
474 | static inline int dbg_debugfs_init(void) { return 0; } | ||
475 | static inline void dbg_debugfs_exit(void) { return; } | ||
476 | static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; } | ||
477 | static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; } | ||
478 | |||
479 | #endif /* !CONFIG_UBIFS_FS_DEBUG */ | ||
480 | #endif /* !__UBIFS_DEBUG_H__ */ | 313 | #endif /* !__UBIFS_DEBUG_H__ */ |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ec9f1870ab7f..62a2727f4ecf 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
@@ -170,8 +170,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, | |||
170 | return inode; | 170 | return inode; |
171 | } | 171 | } |
172 | 172 | ||
173 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
174 | |||
175 | static int dbg_check_name(const struct ubifs_info *c, | 173 | static int dbg_check_name(const struct ubifs_info *c, |
176 | const struct ubifs_dent_node *dent, | 174 | const struct ubifs_dent_node *dent, |
177 | const struct qstr *nm) | 175 | const struct qstr *nm) |
@@ -185,12 +183,6 @@ static int dbg_check_name(const struct ubifs_info *c, | |||
185 | return 0; | 183 | return 0; |
186 | } | 184 | } |
187 | 185 | ||
188 | #else | ||
189 | |||
190 | #define dbg_check_name(c, dent, nm) 0 | ||
191 | |||
192 | #endif | ||
193 | |||
194 | static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, | 186 | static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, |
195 | struct nameidata *nd) | 187 | struct nameidata *nd) |
196 | { | 188 | { |
@@ -1187,12 +1179,10 @@ const struct inode_operations ubifs_dir_inode_operations = { | |||
1187 | .rename = ubifs_rename, | 1179 | .rename = ubifs_rename, |
1188 | .setattr = ubifs_setattr, | 1180 | .setattr = ubifs_setattr, |
1189 | .getattr = ubifs_getattr, | 1181 | .getattr = ubifs_getattr, |
1190 | #ifdef CONFIG_UBIFS_FS_XATTR | ||
1191 | .setxattr = ubifs_setxattr, | 1182 | .setxattr = ubifs_setxattr, |
1192 | .getxattr = ubifs_getxattr, | 1183 | .getxattr = ubifs_getxattr, |
1193 | .listxattr = ubifs_listxattr, | 1184 | .listxattr = ubifs_listxattr, |
1194 | .removexattr = ubifs_removexattr, | 1185 | .removexattr = ubifs_removexattr, |
1195 | #endif | ||
1196 | }; | 1186 | }; |
1197 | 1187 | ||
1198 | const struct file_operations ubifs_dir_operations = { | 1188 | const struct file_operations ubifs_dir_operations = { |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5c8f6dc1d28b..35389ca2d267 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -97,7 +97,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, | |||
97 | dump: | 97 | dump: |
98 | ubifs_err("bad data node (block %u, inode %lu)", | 98 | ubifs_err("bad data node (block %u, inode %lu)", |
99 | block, inode->i_ino); | 99 | block, inode->i_ino); |
100 | dbg_dump_node(c, dn); | 100 | ubifs_dump_node(c, dn); |
101 | return -EINVAL; | 101 | return -EINVAL; |
102 | } | 102 | } |
103 | 103 | ||
@@ -1562,12 +1562,10 @@ const struct address_space_operations ubifs_file_address_operations = { | |||
1562 | const struct inode_operations ubifs_file_inode_operations = { | 1562 | const struct inode_operations ubifs_file_inode_operations = { |
1563 | .setattr = ubifs_setattr, | 1563 | .setattr = ubifs_setattr, |
1564 | .getattr = ubifs_getattr, | 1564 | .getattr = ubifs_getattr, |
1565 | #ifdef CONFIG_UBIFS_FS_XATTR | ||
1566 | .setxattr = ubifs_setxattr, | 1565 | .setxattr = ubifs_setxattr, |
1567 | .getxattr = ubifs_getxattr, | 1566 | .getxattr = ubifs_getxattr, |
1568 | .listxattr = ubifs_listxattr, | 1567 | .listxattr = ubifs_listxattr, |
1569 | .removexattr = ubifs_removexattr, | 1568 | .removexattr = ubifs_removexattr, |
1570 | #endif | ||
1571 | }; | 1569 | }; |
1572 | 1570 | ||
1573 | const struct inode_operations ubifs_symlink_inode_operations = { | 1571 | const struct inode_operations ubifs_symlink_inode_operations = { |
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index ded29f6224c2..04dd6f47635e 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c | |||
@@ -109,7 +109,7 @@ static int switch_gc_head(struct ubifs_info *c) | |||
109 | return err; | 109 | return err; |
110 | 110 | ||
111 | c->gc_lnum = -1; | 111 | c->gc_lnum = -1; |
112 | err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM); | 112 | err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0); |
113 | return err; | 113 | return err; |
114 | } | 114 | } |
115 | 115 | ||
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 9228950a658f..e18b9889a51b 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c | |||
@@ -109,13 +109,13 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, | |||
109 | if (err && (err != -EBADMSG || even_ebadmsg)) { | 109 | if (err && (err != -EBADMSG || even_ebadmsg)) { |
110 | ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", | 110 | ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", |
111 | len, lnum, offs, err); | 111 | len, lnum, offs, err); |
112 | dbg_dump_stack(); | 112 | dump_stack(); |
113 | } | 113 | } |
114 | return err; | 114 | return err; |
115 | } | 115 | } |
116 | 116 | ||
117 | int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, | 117 | int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, |
118 | int len, int dtype) | 118 | int len) |
119 | { | 119 | { |
120 | int err; | 120 | int err; |
121 | 121 | ||
@@ -123,20 +123,19 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, | |||
123 | if (c->ro_error) | 123 | if (c->ro_error) |
124 | return -EROFS; | 124 | return -EROFS; |
125 | if (!dbg_is_tst_rcvry(c)) | 125 | if (!dbg_is_tst_rcvry(c)) |
126 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); | 126 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len); |
127 | else | 127 | else |
128 | err = dbg_leb_write(c, lnum, buf, offs, len, dtype); | 128 | err = dbg_leb_write(c, lnum, buf, offs, len); |
129 | if (err) { | 129 | if (err) { |
130 | ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", | 130 | ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", |
131 | len, lnum, offs, err); | 131 | len, lnum, offs, err); |
132 | ubifs_ro_mode(c, err); | 132 | ubifs_ro_mode(c, err); |
133 | dbg_dump_stack(); | 133 | dump_stack(); |
134 | } | 134 | } |
135 | return err; | 135 | return err; |
136 | } | 136 | } |
137 | 137 | ||
138 | int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, | 138 | int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) |
139 | int dtype) | ||
140 | { | 139 | { |
141 | int err; | 140 | int err; |
142 | 141 | ||
@@ -144,14 +143,14 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, | |||
144 | if (c->ro_error) | 143 | if (c->ro_error) |
145 | return -EROFS; | 144 | return -EROFS; |
146 | if (!dbg_is_tst_rcvry(c)) | 145 | if (!dbg_is_tst_rcvry(c)) |
147 | err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); | 146 | err = ubi_leb_change(c->ubi, lnum, buf, len); |
148 | else | 147 | else |
149 | err = dbg_leb_change(c, lnum, buf, len, dtype); | 148 | err = dbg_leb_change(c, lnum, buf, len); |
150 | if (err) { | 149 | if (err) { |
151 | ubifs_err("changing %d bytes in LEB %d failed, error %d", | 150 | ubifs_err("changing %d bytes in LEB %d failed, error %d", |
152 | len, lnum, err); | 151 | len, lnum, err); |
153 | ubifs_ro_mode(c, err); | 152 | ubifs_ro_mode(c, err); |
154 | dbg_dump_stack(); | 153 | dump_stack(); |
155 | } | 154 | } |
156 | return err; | 155 | return err; |
157 | } | 156 | } |
@@ -170,12 +169,12 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum) | |||
170 | if (err) { | 169 | if (err) { |
171 | ubifs_err("unmap LEB %d failed, error %d", lnum, err); | 170 | ubifs_err("unmap LEB %d failed, error %d", lnum, err); |
172 | ubifs_ro_mode(c, err); | 171 | ubifs_ro_mode(c, err); |
173 | dbg_dump_stack(); | 172 | dump_stack(); |
174 | } | 173 | } |
175 | return err; | 174 | return err; |
176 | } | 175 | } |
177 | 176 | ||
178 | int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype) | 177 | int ubifs_leb_map(struct ubifs_info *c, int lnum) |
179 | { | 178 | { |
180 | int err; | 179 | int err; |
181 | 180 | ||
@@ -183,13 +182,13 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype) | |||
183 | if (c->ro_error) | 182 | if (c->ro_error) |
184 | return -EROFS; | 183 | return -EROFS; |
185 | if (!dbg_is_tst_rcvry(c)) | 184 | if (!dbg_is_tst_rcvry(c)) |
186 | err = ubi_leb_map(c->ubi, lnum, dtype); | 185 | err = ubi_leb_map(c->ubi, lnum); |
187 | else | 186 | else |
188 | err = dbg_leb_map(c, lnum, dtype); | 187 | err = dbg_leb_map(c, lnum); |
189 | if (err) { | 188 | if (err) { |
190 | ubifs_err("mapping LEB %d failed, error %d", lnum, err); | 189 | ubifs_err("mapping LEB %d failed, error %d", lnum, err); |
191 | ubifs_ro_mode(c, err); | 190 | ubifs_ro_mode(c, err); |
192 | dbg_dump_stack(); | 191 | dump_stack(); |
193 | } | 192 | } |
194 | return err; | 193 | return err; |
195 | } | 194 | } |
@@ -202,7 +201,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) | |||
202 | if (err < 0) { | 201 | if (err < 0) { |
203 | ubifs_err("ubi_is_mapped failed for LEB %d, error %d", | 202 | ubifs_err("ubi_is_mapped failed for LEB %d, error %d", |
204 | lnum, err); | 203 | lnum, err); |
205 | dbg_dump_stack(); | 204 | dump_stack(); |
206 | } | 205 | } |
207 | return err; | 206 | return err; |
208 | } | 207 | } |
@@ -294,8 +293,8 @@ out_len: | |||
294 | out: | 293 | out: |
295 | if (!quiet) { | 294 | if (!quiet) { |
296 | ubifs_err("bad node at LEB %d:%d", lnum, offs); | 295 | ubifs_err("bad node at LEB %d:%d", lnum, offs); |
297 | dbg_dump_node(c, buf); | 296 | ubifs_dump_node(c, buf); |
298 | dbg_dump_stack(); | 297 | dump_stack(); |
299 | } | 298 | } |
300 | return err; | 299 | return err; |
301 | } | 300 | } |
@@ -523,8 +522,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) | |||
523 | dirt = sync_len - wbuf->used; | 522 | dirt = sync_len - wbuf->used; |
524 | if (dirt) | 523 | if (dirt) |
525 | ubifs_pad(c, wbuf->buf + wbuf->used, dirt); | 524 | ubifs_pad(c, wbuf->buf + wbuf->used, dirt); |
526 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len, | 525 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); |
527 | wbuf->dtype); | ||
528 | if (err) | 526 | if (err) |
529 | return err; | 527 | return err; |
530 | 528 | ||
@@ -562,14 +560,12 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) | |||
562 | * @wbuf: write-buffer | 560 | * @wbuf: write-buffer |
563 | * @lnum: logical eraseblock number to seek to | 561 | * @lnum: logical eraseblock number to seek to |
564 | * @offs: logical eraseblock offset to seek to | 562 | * @offs: logical eraseblock offset to seek to |
565 | * @dtype: data type | ||
566 | * | 563 | * |
567 | * This function targets the write-buffer to logical eraseblock @lnum:@offs. | 564 | * This function targets the write-buffer to logical eraseblock @lnum:@offs. |
568 | * The write-buffer has to be empty. Returns zero in case of success and a | 565 | * The write-buffer has to be empty. Returns zero in case of success and a |
569 | * negative error code in case of failure. | 566 | * negative error code in case of failure. |
570 | */ | 567 | */ |
571 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, | 568 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs) |
572 | int dtype) | ||
573 | { | 569 | { |
574 | const struct ubifs_info *c = wbuf->c; | 570 | const struct ubifs_info *c = wbuf->c; |
575 | 571 | ||
@@ -592,7 +588,6 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, | |||
592 | wbuf->avail = wbuf->size; | 588 | wbuf->avail = wbuf->size; |
593 | wbuf->used = 0; | 589 | wbuf->used = 0; |
594 | spin_unlock(&wbuf->lock); | 590 | spin_unlock(&wbuf->lock); |
595 | wbuf->dtype = dtype; | ||
596 | 591 | ||
597 | return 0; | 592 | return 0; |
598 | } | 593 | } |
@@ -719,8 +714,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |||
719 | dbg_io("flush jhead %s wbuf to LEB %d:%d", | 714 | dbg_io("flush jhead %s wbuf to LEB %d:%d", |
720 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); | 715 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); |
721 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, | 716 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, |
722 | wbuf->offs, wbuf->size, | 717 | wbuf->offs, wbuf->size); |
723 | wbuf->dtype); | ||
724 | if (err) | 718 | if (err) |
725 | goto out; | 719 | goto out; |
726 | 720 | ||
@@ -756,7 +750,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |||
756 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); | 750 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); |
757 | memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); | 751 | memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); |
758 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, | 752 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, |
759 | wbuf->size, wbuf->dtype); | 753 | wbuf->size); |
760 | if (err) | 754 | if (err) |
761 | goto out; | 755 | goto out; |
762 | 756 | ||
@@ -775,7 +769,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |||
775 | dbg_io("write %d bytes to LEB %d:%d", | 769 | dbg_io("write %d bytes to LEB %d:%d", |
776 | wbuf->size, wbuf->lnum, wbuf->offs); | 770 | wbuf->size, wbuf->lnum, wbuf->offs); |
777 | err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, | 771 | err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, |
778 | wbuf->size, wbuf->dtype); | 772 | wbuf->size); |
779 | if (err) | 773 | if (err) |
780 | goto out; | 774 | goto out; |
781 | 775 | ||
@@ -797,7 +791,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |||
797 | dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, | 791 | dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, |
798 | wbuf->offs); | 792 | wbuf->offs); |
799 | err = ubifs_leb_write(c, wbuf->lnum, buf + written, | 793 | err = ubifs_leb_write(c, wbuf->lnum, buf + written, |
800 | wbuf->offs, n, wbuf->dtype); | 794 | wbuf->offs, n); |
801 | if (err) | 795 | if (err) |
802 | goto out; | 796 | goto out; |
803 | wbuf->offs += n; | 797 | wbuf->offs += n; |
@@ -841,9 +835,9 @@ exit: | |||
841 | out: | 835 | out: |
842 | ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", | 836 | ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", |
843 | len, wbuf->lnum, wbuf->offs, err); | 837 | len, wbuf->lnum, wbuf->offs, err); |
844 | dbg_dump_node(c, buf); | 838 | ubifs_dump_node(c, buf); |
845 | dbg_dump_stack(); | 839 | dump_stack(); |
846 | dbg_dump_leb(c, wbuf->lnum); | 840 | ubifs_dump_leb(c, wbuf->lnum); |
847 | return err; | 841 | return err; |
848 | } | 842 | } |
849 | 843 | ||
@@ -854,7 +848,6 @@ out: | |||
854 | * @len: node length | 848 | * @len: node length |
855 | * @lnum: logical eraseblock number | 849 | * @lnum: logical eraseblock number |
856 | * @offs: offset within the logical eraseblock | 850 | * @offs: offset within the logical eraseblock |
857 | * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) | ||
858 | * | 851 | * |
859 | * This function automatically fills node magic number, assigns sequence | 852 | * This function automatically fills node magic number, assigns sequence |
860 | * number, and calculates node CRC checksum. The length of the @buf buffer has | 853 | * number, and calculates node CRC checksum. The length of the @buf buffer has |
@@ -863,7 +856,7 @@ out: | |||
863 | * success and a negative error code in case of failure. | 856 | * success and a negative error code in case of failure. |
864 | */ | 857 | */ |
865 | int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, | 858 | int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, |
866 | int offs, int dtype) | 859 | int offs) |
867 | { | 860 | { |
868 | int err, buf_len = ALIGN(len, c->min_io_size); | 861 | int err, buf_len = ALIGN(len, c->min_io_size); |
869 | 862 | ||
@@ -879,9 +872,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, | |||
879 | return -EROFS; | 872 | return -EROFS; |
880 | 873 | ||
881 | ubifs_prepare_node(c, buf, len, 1); | 874 | ubifs_prepare_node(c, buf, len, 1); |
882 | err = ubifs_leb_write(c, lnum, buf, offs, buf_len, dtype); | 875 | err = ubifs_leb_write(c, lnum, buf, offs, buf_len); |
883 | if (err) | 876 | if (err) |
884 | dbg_dump_node(c, buf); | 877 | ubifs_dump_node(c, buf); |
885 | 878 | ||
886 | return err; | 879 | return err; |
887 | } | 880 | } |
@@ -960,8 +953,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, | |||
960 | 953 | ||
961 | out: | 954 | out: |
962 | ubifs_err("bad node at LEB %d:%d", lnum, offs); | 955 | ubifs_err("bad node at LEB %d:%d", lnum, offs); |
963 | dbg_dump_node(c, buf); | 956 | ubifs_dump_node(c, buf); |
964 | dbg_dump_stack(); | 957 | dump_stack(); |
965 | return -EINVAL; | 958 | return -EINVAL; |
966 | } | 959 | } |
967 | 960 | ||
@@ -1017,8 +1010,8 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, | |||
1017 | out: | 1010 | out: |
1018 | ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, | 1011 | ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, |
1019 | ubi_is_mapped(c->ubi, lnum)); | 1012 | ubi_is_mapped(c->ubi, lnum)); |
1020 | dbg_dump_node(c, buf); | 1013 | ubifs_dump_node(c, buf); |
1021 | dbg_dump_stack(); | 1014 | dump_stack(); |
1022 | return -EINVAL; | 1015 | return -EINVAL; |
1023 | } | 1016 | } |
1024 | 1017 | ||
@@ -1056,7 +1049,6 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) | |||
1056 | */ | 1049 | */ |
1057 | size = c->max_write_size - (c->leb_start % c->max_write_size); | 1050 | size = c->max_write_size - (c->leb_start % c->max_write_size); |
1058 | wbuf->avail = wbuf->size = size; | 1051 | wbuf->avail = wbuf->size = size; |
1059 | wbuf->dtype = UBI_UNKNOWN; | ||
1060 | wbuf->sync_callback = NULL; | 1052 | wbuf->sync_callback = NULL; |
1061 | mutex_init(&wbuf->io_mutex); | 1053 | mutex_init(&wbuf->io_mutex); |
1062 | spin_lock_init(&wbuf->lock); | 1054 | spin_lock_init(&wbuf->lock); |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 2f438ab2e7a2..12c0f154ca83 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
@@ -214,7 +214,7 @@ out: | |||
214 | err = ubifs_add_bud_to_log(c, jhead, lnum, offs); | 214 | err = ubifs_add_bud_to_log(c, jhead, lnum, offs); |
215 | if (err) | 215 | if (err) |
216 | goto out_return; | 216 | goto out_return; |
217 | err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype); | 217 | err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); |
218 | if (err) | 218 | if (err) |
219 | goto out_unlock; | 219 | goto out_unlock; |
220 | 220 | ||
@@ -385,9 +385,9 @@ out: | |||
385 | if (err == -ENOSPC) { | 385 | if (err == -ENOSPC) { |
386 | /* This are some budgeting problems, print useful information */ | 386 | /* This are some budgeting problems, print useful information */ |
387 | down_write(&c->commit_sem); | 387 | down_write(&c->commit_sem); |
388 | dbg_dump_stack(); | 388 | dump_stack(); |
389 | dbg_dump_budg(c, &c->bi); | 389 | ubifs_dump_budg(c, &c->bi); |
390 | dbg_dump_lprops(c); | 390 | ubifs_dump_lprops(c); |
391 | cmt_retries = dbg_check_lprops(c); | 391 | cmt_retries = dbg_check_lprops(c); |
392 | up_write(&c->commit_sem); | 392 | up_write(&c->commit_sem); |
393 | } | 393 | } |
@@ -1267,7 +1267,6 @@ out_free: | |||
1267 | return err; | 1267 | return err; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | #ifdef CONFIG_UBIFS_FS_XATTR | ||
1271 | 1270 | ||
1272 | /** | 1271 | /** |
1273 | * ubifs_jnl_delete_xattr - delete an extended attribute. | 1272 | * ubifs_jnl_delete_xattr - delete an extended attribute. |
@@ -1462,4 +1461,3 @@ out_free: | |||
1462 | return err; | 1461 | return err; |
1463 | } | 1462 | } |
1464 | 1463 | ||
1465 | #endif /* CONFIG_UBIFS_FS_XATTR */ | ||
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index f9fd068d1ae0..c80b15d6c8de 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c | |||
@@ -29,11 +29,7 @@ | |||
29 | 29 | ||
30 | #include "ubifs.h" | 30 | #include "ubifs.h" |
31 | 31 | ||
32 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
33 | static int dbg_check_bud_bytes(struct ubifs_info *c); | 32 | static int dbg_check_bud_bytes(struct ubifs_info *c); |
34 | #else | ||
35 | #define dbg_check_bud_bytes(c) 0 | ||
36 | #endif | ||
37 | 33 | ||
38 | /** | 34 | /** |
39 | * ubifs_search_bud - search bud LEB. | 35 | * ubifs_search_bud - search bud LEB. |
@@ -262,7 +258,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) | |||
262 | * an unclean reboot, because the target LEB might have been | 258 | * an unclean reboot, because the target LEB might have been |
263 | * unmapped, but not yet physically erased. | 259 | * unmapped, but not yet physically erased. |
264 | */ | 260 | */ |
265 | err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM); | 261 | err = ubifs_leb_map(c, bud->lnum); |
266 | if (err) | 262 | if (err) |
267 | goto out_unlock; | 263 | goto out_unlock; |
268 | } | 264 | } |
@@ -270,7 +266,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) | |||
270 | dbg_log("write ref LEB %d:%d", | 266 | dbg_log("write ref LEB %d:%d", |
271 | c->lhead_lnum, c->lhead_offs); | 267 | c->lhead_lnum, c->lhead_offs); |
272 | err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, | 268 | err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, |
273 | c->lhead_offs, UBI_SHORTTERM); | 269 | c->lhead_offs); |
274 | if (err) | 270 | if (err) |
275 | goto out_unlock; | 271 | goto out_unlock; |
276 | 272 | ||
@@ -422,7 +418,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) | |||
422 | 418 | ||
423 | len = ALIGN(len, c->min_io_size); | 419 | len = ALIGN(len, c->min_io_size); |
424 | dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); | 420 | dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); |
425 | err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM); | 421 | err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len); |
426 | if (err) | 422 | if (err) |
427 | goto out; | 423 | goto out; |
428 | 424 | ||
@@ -623,7 +619,7 @@ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, | |||
623 | int sz = ALIGN(*offs, c->min_io_size), err; | 619 | int sz = ALIGN(*offs, c->min_io_size), err; |
624 | 620 | ||
625 | ubifs_pad(c, buf + *offs, sz - *offs); | 621 | ubifs_pad(c, buf + *offs, sz - *offs); |
626 | err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM); | 622 | err = ubifs_leb_change(c, *lnum, buf, sz); |
627 | if (err) | 623 | if (err) |
628 | return err; | 624 | return err; |
629 | *lnum = ubifs_next_log_lnum(c, *lnum); | 625 | *lnum = ubifs_next_log_lnum(c, *lnum); |
@@ -702,7 +698,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) | |||
702 | int sz = ALIGN(offs, c->min_io_size); | 698 | int sz = ALIGN(offs, c->min_io_size); |
703 | 699 | ||
704 | ubifs_pad(c, buf + offs, sz - offs); | 700 | ubifs_pad(c, buf + offs, sz - offs); |
705 | err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM); | 701 | err = ubifs_leb_change(c, write_lnum, buf, sz); |
706 | if (err) | 702 | if (err) |
707 | goto out_free; | 703 | goto out_free; |
708 | offs = ALIGN(offs, c->min_io_size); | 704 | offs = ALIGN(offs, c->min_io_size); |
@@ -734,8 +730,6 @@ out_free: | |||
734 | return err; | 730 | return err; |
735 | } | 731 | } |
736 | 732 | ||
737 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
738 | |||
739 | /** | 733 | /** |
740 | * dbg_check_bud_bytes - make sure bud bytes calculation are all right. | 734 | * dbg_check_bud_bytes - make sure bud bytes calculation are all right. |
741 | * @c: UBIFS file-system description object | 735 | * @c: UBIFS file-system description object |
@@ -767,5 +761,3 @@ static int dbg_check_bud_bytes(struct ubifs_info *c) | |||
767 | 761 | ||
768 | return err; | 762 | return err; |
769 | } | 763 | } |
770 | |||
771 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index f8a181e647cc..86eb8e533249 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c | |||
@@ -447,7 +447,7 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) | |||
447 | int new_cat = ubifs_categorize_lprops(c, lprops); | 447 | int new_cat = ubifs_categorize_lprops(c, lprops); |
448 | 448 | ||
449 | if (old_cat == new_cat) { | 449 | if (old_cat == new_cat) { |
450 | struct ubifs_lpt_heap *heap = &c->lpt_heap[new_cat - 1]; | 450 | struct ubifs_lpt_heap *heap; |
451 | 451 | ||
452 | /* lprops on a heap now must be moved up or down */ | 452 | /* lprops on a heap now must be moved up or down */ |
453 | if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT) | 453 | if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT) |
@@ -846,7 +846,9 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c) | |||
846 | return lprops; | 846 | return lprops; |
847 | } | 847 | } |
848 | 848 | ||
849 | #ifdef CONFIG_UBIFS_FS_DEBUG | 849 | /* |
850 | * Everything below is related to debugging. | ||
851 | */ | ||
850 | 852 | ||
851 | /** | 853 | /** |
852 | * dbg_check_cats - check category heaps and lists. | 854 | * dbg_check_cats - check category heaps and lists. |
@@ -1001,8 +1003,8 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, | |||
1001 | out: | 1003 | out: |
1002 | if (err) { | 1004 | if (err) { |
1003 | dbg_msg("failed cat %d hpos %d err %d", cat, i, err); | 1005 | dbg_msg("failed cat %d hpos %d err %d", cat, i, err); |
1004 | dbg_dump_stack(); | 1006 | dump_stack(); |
1005 | dbg_dump_heap(c, heap, cat); | 1007 | ubifs_dump_heap(c, heap, cat); |
1006 | } | 1008 | } |
1007 | } | 1009 | } |
1008 | 1010 | ||
@@ -1109,8 +1111,8 @@ static int scan_check_cb(struct ubifs_info *c, | |||
1109 | if (IS_ERR(sleb)) { | 1111 | if (IS_ERR(sleb)) { |
1110 | ret = PTR_ERR(sleb); | 1112 | ret = PTR_ERR(sleb); |
1111 | if (ret == -EUCLEAN) { | 1113 | if (ret == -EUCLEAN) { |
1112 | dbg_dump_lprops(c); | 1114 | ubifs_dump_lprops(c); |
1113 | dbg_dump_budg(c, &c->bi); | 1115 | ubifs_dump_budg(c, &c->bi); |
1114 | } | 1116 | } |
1115 | goto out; | 1117 | goto out; |
1116 | } | 1118 | } |
@@ -1237,7 +1239,7 @@ out_print: | |||
1237 | ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, " | 1239 | ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, " |
1238 | "should be free %d, dirty %d", | 1240 | "should be free %d, dirty %d", |
1239 | lnum, lp->free, lp->dirty, lp->flags, free, dirty); | 1241 | lnum, lp->free, lp->dirty, lp->flags, free, dirty); |
1240 | dbg_dump_leb(c, lnum); | 1242 | ubifs_dump_leb(c, lnum); |
1241 | out_destroy: | 1243 | out_destroy: |
1242 | ubifs_scan_destroy(sleb); | 1244 | ubifs_scan_destroy(sleb); |
1243 | ret = -EINVAL; | 1245 | ret = -EINVAL; |
@@ -1315,5 +1317,3 @@ int dbg_check_lprops(struct ubifs_info *c) | |||
1315 | out: | 1317 | out: |
1316 | return err; | 1318 | return err; |
1317 | } | 1319 | } |
1318 | |||
1319 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 66d59d0a1402..ce33b2beb151 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c | |||
@@ -701,8 +701,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, | |||
701 | alen = ALIGN(len, c->min_io_size); | 701 | alen = ALIGN(len, c->min_io_size); |
702 | set_ltab(c, lnum, c->leb_size - alen, alen - len); | 702 | set_ltab(c, lnum, c->leb_size - alen, alen - len); |
703 | memset(p, 0xff, alen - len); | 703 | memset(p, 0xff, alen - len); |
704 | err = ubifs_leb_change(c, lnum++, buf, alen, | 704 | err = ubifs_leb_change(c, lnum++, buf, alen); |
705 | UBI_SHORTTERM); | ||
706 | if (err) | 705 | if (err) |
707 | goto out; | 706 | goto out; |
708 | p = buf; | 707 | p = buf; |
@@ -732,8 +731,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, | |||
732 | set_ltab(c, lnum, c->leb_size - alen, | 731 | set_ltab(c, lnum, c->leb_size - alen, |
733 | alen - len); | 732 | alen - len); |
734 | memset(p, 0xff, alen - len); | 733 | memset(p, 0xff, alen - len); |
735 | err = ubifs_leb_change(c, lnum++, buf, alen, | 734 | err = ubifs_leb_change(c, lnum++, buf, alen); |
736 | UBI_SHORTTERM); | ||
737 | if (err) | 735 | if (err) |
738 | goto out; | 736 | goto out; |
739 | p = buf; | 737 | p = buf; |
@@ -780,8 +778,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, | |||
780 | alen = ALIGN(len, c->min_io_size); | 778 | alen = ALIGN(len, c->min_io_size); |
781 | set_ltab(c, lnum, c->leb_size - alen, alen - len); | 779 | set_ltab(c, lnum, c->leb_size - alen, alen - len); |
782 | memset(p, 0xff, alen - len); | 780 | memset(p, 0xff, alen - len); |
783 | err = ubifs_leb_change(c, lnum++, buf, alen, | 781 | err = ubifs_leb_change(c, lnum++, buf, alen); |
784 | UBI_SHORTTERM); | ||
785 | if (err) | 782 | if (err) |
786 | goto out; | 783 | goto out; |
787 | p = buf; | 784 | p = buf; |
@@ -806,7 +803,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, | |||
806 | alen = ALIGN(len, c->min_io_size); | 803 | alen = ALIGN(len, c->min_io_size); |
807 | set_ltab(c, lnum, c->leb_size - alen, alen - len); | 804 | set_ltab(c, lnum, c->leb_size - alen, alen - len); |
808 | memset(p, 0xff, alen - len); | 805 | memset(p, 0xff, alen - len); |
809 | err = ubifs_leb_change(c, lnum++, buf, alen, UBI_SHORTTERM); | 806 | err = ubifs_leb_change(c, lnum++, buf, alen); |
810 | if (err) | 807 | if (err) |
811 | goto out; | 808 | goto out; |
812 | p = buf; | 809 | p = buf; |
@@ -826,7 +823,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, | |||
826 | 823 | ||
827 | /* Write remaining buffer */ | 824 | /* Write remaining buffer */ |
828 | memset(p, 0xff, alen - len); | 825 | memset(p, 0xff, alen - len); |
829 | err = ubifs_leb_change(c, lnum, buf, alen, UBI_SHORTTERM); | 826 | err = ubifs_leb_change(c, lnum, buf, alen); |
830 | if (err) | 827 | if (err) |
831 | goto out; | 828 | goto out; |
832 | 829 | ||
@@ -926,7 +923,7 @@ static int check_lpt_crc(void *buf, int len) | |||
926 | if (crc != calc_crc) { | 923 | if (crc != calc_crc) { |
927 | ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc, | 924 | ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc, |
928 | calc_crc); | 925 | calc_crc); |
929 | dbg_dump_stack(); | 926 | dump_stack(); |
930 | return -EINVAL; | 927 | return -EINVAL; |
931 | } | 928 | } |
932 | return 0; | 929 | return 0; |
@@ -949,7 +946,7 @@ static int check_lpt_type(uint8_t **addr, int *pos, int type) | |||
949 | if (node_type != type) { | 946 | if (node_type != type) { |
950 | ubifs_err("invalid type (%d) in LPT node type %d", node_type, | 947 | ubifs_err("invalid type (%d) in LPT node type %d", node_type, |
951 | type); | 948 | type); |
952 | dbg_dump_stack(); | 949 | dump_stack(); |
953 | return -EINVAL; | 950 | return -EINVAL; |
954 | } | 951 | } |
955 | return 0; | 952 | return 0; |
@@ -1247,7 +1244,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) | |||
1247 | 1244 | ||
1248 | out: | 1245 | out: |
1249 | ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs); | 1246 | ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs); |
1250 | dbg_dump_stack(); | 1247 | dump_stack(); |
1251 | kfree(nnode); | 1248 | kfree(nnode); |
1252 | return err; | 1249 | return err; |
1253 | } | 1250 | } |
@@ -1312,8 +1309,8 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) | |||
1312 | 1309 | ||
1313 | out: | 1310 | out: |
1314 | ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs); | 1311 | ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs); |
1315 | dbg_dump_pnode(c, pnode, parent, iip); | 1312 | ubifs_dump_pnode(c, pnode, parent, iip); |
1316 | dbg_dump_stack(); | 1313 | dump_stack(); |
1317 | dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); | 1314 | dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); |
1318 | kfree(pnode); | 1315 | kfree(pnode); |
1319 | return err; | 1316 | return err; |
@@ -1740,16 +1737,20 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr) | |||
1740 | if (rd) { | 1737 | if (rd) { |
1741 | err = lpt_init_rd(c); | 1738 | err = lpt_init_rd(c); |
1742 | if (err) | 1739 | if (err) |
1743 | return err; | 1740 | goto out_err; |
1744 | } | 1741 | } |
1745 | 1742 | ||
1746 | if (wr) { | 1743 | if (wr) { |
1747 | err = lpt_init_wr(c); | 1744 | err = lpt_init_wr(c); |
1748 | if (err) | 1745 | if (err) |
1749 | return err; | 1746 | goto out_err; |
1750 | } | 1747 | } |
1751 | 1748 | ||
1752 | return 0; | 1749 | return 0; |
1750 | |||
1751 | out_err: | ||
1752 | ubifs_lpt_free(c, 0); | ||
1753 | return err; | ||
1753 | } | 1754 | } |
1754 | 1755 | ||
1755 | /** | 1756 | /** |
@@ -2080,8 +2081,6 @@ out: | |||
2080 | return err; | 2081 | return err; |
2081 | } | 2082 | } |
2082 | 2083 | ||
2083 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
2084 | |||
2085 | /** | 2084 | /** |
2086 | * dbg_chk_pnode - check a pnode. | 2085 | * dbg_chk_pnode - check a pnode. |
2087 | * @c: the UBIFS file-system description object | 2086 | * @c: the UBIFS file-system description object |
@@ -2096,8 +2095,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2096 | int i; | 2095 | int i; |
2097 | 2096 | ||
2098 | if (pnode->num != col) { | 2097 | if (pnode->num != col) { |
2099 | dbg_err("pnode num %d expected %d parent num %d iip %d", | 2098 | ubifs_err("pnode num %d expected %d parent num %d iip %d", |
2100 | pnode->num, col, pnode->parent->num, pnode->iip); | 2099 | pnode->num, col, pnode->parent->num, pnode->iip); |
2101 | return -EINVAL; | 2100 | return -EINVAL; |
2102 | } | 2101 | } |
2103 | for (i = 0; i < UBIFS_LPT_FANOUT; i++) { | 2102 | for (i = 0; i < UBIFS_LPT_FANOUT; i++) { |
@@ -2111,14 +2110,14 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2111 | if (lnum >= c->leb_cnt) | 2110 | if (lnum >= c->leb_cnt) |
2112 | continue; | 2111 | continue; |
2113 | if (lprops->lnum != lnum) { | 2112 | if (lprops->lnum != lnum) { |
2114 | dbg_err("bad LEB number %d expected %d", | 2113 | ubifs_err("bad LEB number %d expected %d", |
2115 | lprops->lnum, lnum); | 2114 | lprops->lnum, lnum); |
2116 | return -EINVAL; | 2115 | return -EINVAL; |
2117 | } | 2116 | } |
2118 | if (lprops->flags & LPROPS_TAKEN) { | 2117 | if (lprops->flags & LPROPS_TAKEN) { |
2119 | if (cat != LPROPS_UNCAT) { | 2118 | if (cat != LPROPS_UNCAT) { |
2120 | dbg_err("LEB %d taken but not uncat %d", | 2119 | ubifs_err("LEB %d taken but not uncat %d", |
2121 | lprops->lnum, cat); | 2120 | lprops->lnum, cat); |
2122 | return -EINVAL; | 2121 | return -EINVAL; |
2123 | } | 2122 | } |
2124 | continue; | 2123 | continue; |
@@ -2130,8 +2129,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2130 | case LPROPS_FRDI_IDX: | 2129 | case LPROPS_FRDI_IDX: |
2131 | break; | 2130 | break; |
2132 | default: | 2131 | default: |
2133 | dbg_err("LEB %d index but cat %d", | 2132 | ubifs_err("LEB %d index but cat %d", |
2134 | lprops->lnum, cat); | 2133 | lprops->lnum, cat); |
2135 | return -EINVAL; | 2134 | return -EINVAL; |
2136 | } | 2135 | } |
2137 | } else { | 2136 | } else { |
@@ -2143,8 +2142,8 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2143 | case LPROPS_FREEABLE: | 2142 | case LPROPS_FREEABLE: |
2144 | break; | 2143 | break; |
2145 | default: | 2144 | default: |
2146 | dbg_err("LEB %d not index but cat %d", | 2145 | ubifs_err("LEB %d not index but cat %d", |
2147 | lprops->lnum, cat); | 2146 | lprops->lnum, cat); |
2148 | return -EINVAL; | 2147 | return -EINVAL; |
2149 | } | 2148 | } |
2150 | } | 2149 | } |
@@ -2184,24 +2183,24 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, | |||
2184 | break; | 2183 | break; |
2185 | } | 2184 | } |
2186 | if (!found) { | 2185 | if (!found) { |
2187 | dbg_err("LEB %d cat %d not found in cat heap/list", | 2186 | ubifs_err("LEB %d cat %d not found in cat heap/list", |
2188 | lprops->lnum, cat); | 2187 | lprops->lnum, cat); |
2189 | return -EINVAL; | 2188 | return -EINVAL; |
2190 | } | 2189 | } |
2191 | switch (cat) { | 2190 | switch (cat) { |
2192 | case LPROPS_EMPTY: | 2191 | case LPROPS_EMPTY: |
2193 | if (lprops->free != c->leb_size) { | 2192 | if (lprops->free != c->leb_size) { |
2194 | dbg_err("LEB %d cat %d free %d dirty %d", | 2193 | ubifs_err("LEB %d cat %d free %d dirty %d", |
2195 | lprops->lnum, cat, lprops->free, | 2194 | lprops->lnum, cat, lprops->free, |
2196 | lprops->dirty); | 2195 | lprops->dirty); |
2197 | return -EINVAL; | 2196 | return -EINVAL; |
2198 | } | 2197 | } |
2199 | case LPROPS_FREEABLE: | 2198 | case LPROPS_FREEABLE: |
2200 | case LPROPS_FRDI_IDX: | 2199 | case LPROPS_FRDI_IDX: |
2201 | if (lprops->free + lprops->dirty != c->leb_size) { | 2200 | if (lprops->free + lprops->dirty != c->leb_size) { |
2202 | dbg_err("LEB %d cat %d free %d dirty %d", | 2201 | ubifs_err("LEB %d cat %d free %d dirty %d", |
2203 | lprops->lnum, cat, lprops->free, | 2202 | lprops->lnum, cat, lprops->free, |
2204 | lprops->dirty); | 2203 | lprops->dirty); |
2205 | return -EINVAL; | 2204 | return -EINVAL; |
2206 | } | 2205 | } |
2207 | } | 2206 | } |
@@ -2235,9 +2234,10 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, | |||
2235 | /* cnode is a nnode */ | 2234 | /* cnode is a nnode */ |
2236 | num = calc_nnode_num(row, col); | 2235 | num = calc_nnode_num(row, col); |
2237 | if (cnode->num != num) { | 2236 | if (cnode->num != num) { |
2238 | dbg_err("nnode num %d expected %d " | 2237 | ubifs_err("nnode num %d expected %d " |
2239 | "parent num %d iip %d", cnode->num, num, | 2238 | "parent num %d iip %d", |
2240 | (nnode ? nnode->num : 0), cnode->iip); | 2239 | cnode->num, num, |
2240 | (nnode ? nnode->num : 0), cnode->iip); | ||
2241 | return -EINVAL; | 2241 | return -EINVAL; |
2242 | } | 2242 | } |
2243 | nn = (struct ubifs_nnode *)cnode; | 2243 | nn = (struct ubifs_nnode *)cnode; |
@@ -2274,5 +2274,3 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, | |||
2274 | } | 2274 | } |
2275 | return 0; | 2275 | return 0; |
2276 | } | 2276 | } |
2277 | |||
2278 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index cddd6bd214f4..4fa70734e6e7 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
@@ -30,11 +30,7 @@ | |||
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | #include "ubifs.h" | 31 | #include "ubifs.h" |
32 | 32 | ||
33 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
34 | static int dbg_populate_lsave(struct ubifs_info *c); | 33 | static int dbg_populate_lsave(struct ubifs_info *c); |
35 | #else | ||
36 | #define dbg_populate_lsave(c) 0 | ||
37 | #endif | ||
38 | 34 | ||
39 | /** | 35 | /** |
40 | * first_dirty_cnode - find first dirty cnode. | 36 | * first_dirty_cnode - find first dirty cnode. |
@@ -324,11 +320,10 @@ static int layout_cnodes(struct ubifs_info *c) | |||
324 | return 0; | 320 | return 0; |
325 | 321 | ||
326 | no_space: | 322 | no_space: |
327 | ubifs_err("LPT out of space"); | 323 | ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " |
328 | dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " | 324 | "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); |
329 | "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); | 325 | ubifs_dump_lpt_info(c); |
330 | dbg_dump_lpt_info(c); | 326 | ubifs_dump_lpt_lebs(c); |
331 | dbg_dump_lpt_lebs(c); | ||
332 | dump_stack(); | 327 | dump_stack(); |
333 | return err; | 328 | return err; |
334 | } | 329 | } |
@@ -421,7 +416,7 @@ static int write_cnodes(struct ubifs_info *c) | |||
421 | alen = ALIGN(wlen, c->min_io_size); | 416 | alen = ALIGN(wlen, c->min_io_size); |
422 | memset(buf + offs, 0xff, alen - wlen); | 417 | memset(buf + offs, 0xff, alen - wlen); |
423 | err = ubifs_leb_write(c, lnum, buf + from, from, | 418 | err = ubifs_leb_write(c, lnum, buf + from, from, |
424 | alen, UBI_SHORTTERM); | 419 | alen); |
425 | if (err) | 420 | if (err) |
426 | return err; | 421 | return err; |
427 | } | 422 | } |
@@ -479,8 +474,7 @@ static int write_cnodes(struct ubifs_info *c) | |||
479 | wlen = offs - from; | 474 | wlen = offs - from; |
480 | alen = ALIGN(wlen, c->min_io_size); | 475 | alen = ALIGN(wlen, c->min_io_size); |
481 | memset(buf + offs, 0xff, alen - wlen); | 476 | memset(buf + offs, 0xff, alen - wlen); |
482 | err = ubifs_leb_write(c, lnum, buf + from, from, alen, | 477 | err = ubifs_leb_write(c, lnum, buf + from, from, alen); |
483 | UBI_SHORTTERM); | ||
484 | if (err) | 478 | if (err) |
485 | return err; | 479 | return err; |
486 | dbg_chk_lpt_sz(c, 2, c->leb_size - offs); | 480 | dbg_chk_lpt_sz(c, 2, c->leb_size - offs); |
@@ -506,8 +500,7 @@ static int write_cnodes(struct ubifs_info *c) | |||
506 | wlen = offs - from; | 500 | wlen = offs - from; |
507 | alen = ALIGN(wlen, c->min_io_size); | 501 | alen = ALIGN(wlen, c->min_io_size); |
508 | memset(buf + offs, 0xff, alen - wlen); | 502 | memset(buf + offs, 0xff, alen - wlen); |
509 | err = ubifs_leb_write(c, lnum, buf + from, from, alen, | 503 | err = ubifs_leb_write(c, lnum, buf + from, from, alen); |
510 | UBI_SHORTTERM); | ||
511 | if (err) | 504 | if (err) |
512 | return err; | 505 | return err; |
513 | dbg_chk_lpt_sz(c, 2, c->leb_size - offs); | 506 | dbg_chk_lpt_sz(c, 2, c->leb_size - offs); |
@@ -531,7 +524,7 @@ static int write_cnodes(struct ubifs_info *c) | |||
531 | wlen = offs - from; | 524 | wlen = offs - from; |
532 | alen = ALIGN(wlen, c->min_io_size); | 525 | alen = ALIGN(wlen, c->min_io_size); |
533 | memset(buf + offs, 0xff, alen - wlen); | 526 | memset(buf + offs, 0xff, alen - wlen); |
534 | err = ubifs_leb_write(c, lnum, buf + from, from, alen, UBI_SHORTTERM); | 527 | err = ubifs_leb_write(c, lnum, buf + from, from, alen); |
535 | if (err) | 528 | if (err) |
536 | return err; | 529 | return err; |
537 | 530 | ||
@@ -552,11 +545,10 @@ static int write_cnodes(struct ubifs_info *c) | |||
552 | return 0; | 545 | return 0; |
553 | 546 | ||
554 | no_space: | 547 | no_space: |
555 | ubifs_err("LPT out of space mismatch"); | 548 | ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " |
556 | dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " | 549 | "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); |
557 | "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); | 550 | ubifs_dump_lpt_info(c); |
558 | dbg_dump_lpt_info(c); | 551 | ubifs_dump_lpt_lebs(c); |
559 | dbg_dump_lpt_lebs(c); | ||
560 | dump_stack(); | 552 | dump_stack(); |
561 | return err; | 553 | return err; |
562 | } | 554 | } |
@@ -1497,7 +1489,9 @@ void ubifs_lpt_free(struct ubifs_info *c, int wr_only) | |||
1497 | kfree(c->lpt_nod_buf); | 1489 | kfree(c->lpt_nod_buf); |
1498 | } | 1490 | } |
1499 | 1491 | ||
1500 | #ifdef CONFIG_UBIFS_FS_DEBUG | 1492 | /* |
1493 | * Everything below is related to debugging. | ||
1494 | */ | ||
1501 | 1495 | ||
1502 | /** | 1496 | /** |
1503 | * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes. | 1497 | * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes. |
@@ -1735,7 +1729,7 @@ int dbg_check_ltab(struct ubifs_info *c) | |||
1735 | for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { | 1729 | for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { |
1736 | err = dbg_check_ltab_lnum(c, lnum); | 1730 | err = dbg_check_ltab_lnum(c, lnum); |
1737 | if (err) { | 1731 | if (err) { |
1738 | dbg_err("failed at LEB %d", lnum); | 1732 | ubifs_err("failed at LEB %d", lnum); |
1739 | return err; | 1733 | return err; |
1740 | } | 1734 | } |
1741 | } | 1735 | } |
@@ -1767,10 +1761,10 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) | |||
1767 | free += c->leb_size; | 1761 | free += c->leb_size; |
1768 | } | 1762 | } |
1769 | if (free < c->lpt_sz) { | 1763 | if (free < c->lpt_sz) { |
1770 | dbg_err("LPT space error: free %lld lpt_sz %lld", | 1764 | ubifs_err("LPT space error: free %lld lpt_sz %lld", |
1771 | free, c->lpt_sz); | 1765 | free, c->lpt_sz); |
1772 | dbg_dump_lpt_info(c); | 1766 | ubifs_dump_lpt_info(c); |
1773 | dbg_dump_lpt_lebs(c); | 1767 | ubifs_dump_lpt_lebs(c); |
1774 | dump_stack(); | 1768 | dump_stack(); |
1775 | return -EINVAL; | 1769 | return -EINVAL; |
1776 | } | 1770 | } |
@@ -1807,13 +1801,13 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) | |||
1807 | d->chk_lpt_lebs = 0; | 1801 | d->chk_lpt_lebs = 0; |
1808 | d->chk_lpt_wastage = 0; | 1802 | d->chk_lpt_wastage = 0; |
1809 | if (c->dirty_pn_cnt > c->pnode_cnt) { | 1803 | if (c->dirty_pn_cnt > c->pnode_cnt) { |
1810 | dbg_err("dirty pnodes %d exceed max %d", | 1804 | ubifs_err("dirty pnodes %d exceed max %d", |
1811 | c->dirty_pn_cnt, c->pnode_cnt); | 1805 | c->dirty_pn_cnt, c->pnode_cnt); |
1812 | err = -EINVAL; | 1806 | err = -EINVAL; |
1813 | } | 1807 | } |
1814 | if (c->dirty_nn_cnt > c->nnode_cnt) { | 1808 | if (c->dirty_nn_cnt > c->nnode_cnt) { |
1815 | dbg_err("dirty nnodes %d exceed max %d", | 1809 | ubifs_err("dirty nnodes %d exceed max %d", |
1816 | c->dirty_nn_cnt, c->nnode_cnt); | 1810 | c->dirty_nn_cnt, c->nnode_cnt); |
1817 | err = -EINVAL; | 1811 | err = -EINVAL; |
1818 | } | 1812 | } |
1819 | return err; | 1813 | return err; |
@@ -1830,23 +1824,23 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) | |||
1830 | chk_lpt_sz *= d->chk_lpt_lebs; | 1824 | chk_lpt_sz *= d->chk_lpt_lebs; |
1831 | chk_lpt_sz += len - c->nhead_offs; | 1825 | chk_lpt_sz += len - c->nhead_offs; |
1832 | if (d->chk_lpt_sz != chk_lpt_sz) { | 1826 | if (d->chk_lpt_sz != chk_lpt_sz) { |
1833 | dbg_err("LPT wrote %lld but space used was %lld", | 1827 | ubifs_err("LPT wrote %lld but space used was %lld", |
1834 | d->chk_lpt_sz, chk_lpt_sz); | 1828 | d->chk_lpt_sz, chk_lpt_sz); |
1835 | err = -EINVAL; | 1829 | err = -EINVAL; |
1836 | } | 1830 | } |
1837 | if (d->chk_lpt_sz > c->lpt_sz) { | 1831 | if (d->chk_lpt_sz > c->lpt_sz) { |
1838 | dbg_err("LPT wrote %lld but lpt_sz is %lld", | 1832 | ubifs_err("LPT wrote %lld but lpt_sz is %lld", |
1839 | d->chk_lpt_sz, c->lpt_sz); | 1833 | d->chk_lpt_sz, c->lpt_sz); |
1840 | err = -EINVAL; | 1834 | err = -EINVAL; |
1841 | } | 1835 | } |
1842 | if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { | 1836 | if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { |
1843 | dbg_err("LPT layout size %lld but wrote %lld", | 1837 | ubifs_err("LPT layout size %lld but wrote %lld", |
1844 | d->chk_lpt_sz, d->chk_lpt_sz2); | 1838 | d->chk_lpt_sz, d->chk_lpt_sz2); |
1845 | err = -EINVAL; | 1839 | err = -EINVAL; |
1846 | } | 1840 | } |
1847 | if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { | 1841 | if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { |
1848 | dbg_err("LPT new nhead offs: expected %d was %d", | 1842 | ubifs_err("LPT new nhead offs: expected %d was %d", |
1849 | d->new_nhead_offs, len); | 1843 | d->new_nhead_offs, len); |
1850 | err = -EINVAL; | 1844 | err = -EINVAL; |
1851 | } | 1845 | } |
1852 | lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; | 1846 | lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; |
@@ -1855,13 +1849,13 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) | |||
1855 | if (c->big_lpt) | 1849 | if (c->big_lpt) |
1856 | lpt_sz += c->lsave_sz; | 1850 | lpt_sz += c->lsave_sz; |
1857 | if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { | 1851 | if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { |
1858 | dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", | 1852 | ubifs_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", |
1859 | d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); | 1853 | d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); |
1860 | err = -EINVAL; | 1854 | err = -EINVAL; |
1861 | } | 1855 | } |
1862 | if (err) { | 1856 | if (err) { |
1863 | dbg_dump_lpt_info(c); | 1857 | ubifs_dump_lpt_info(c); |
1864 | dbg_dump_lpt_lebs(c); | 1858 | ubifs_dump_lpt_lebs(c); |
1865 | dump_stack(); | 1859 | dump_stack(); |
1866 | } | 1860 | } |
1867 | d->chk_lpt_sz2 = d->chk_lpt_sz; | 1861 | d->chk_lpt_sz2 = d->chk_lpt_sz; |
@@ -1880,7 +1874,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) | |||
1880 | } | 1874 | } |
1881 | 1875 | ||
1882 | /** | 1876 | /** |
1883 | * dbg_dump_lpt_leb - dump an LPT LEB. | 1877 | * ubifs_dump_lpt_leb - dump an LPT LEB. |
1884 | * @c: UBIFS file-system description object | 1878 | * @c: UBIFS file-system description object |
1885 | * @lnum: LEB number to dump | 1879 | * @lnum: LEB number to dump |
1886 | * | 1880 | * |
@@ -1986,13 +1980,13 @@ out: | |||
1986 | } | 1980 | } |
1987 | 1981 | ||
1988 | /** | 1982 | /** |
1989 | * dbg_dump_lpt_lebs - dump LPT lebs. | 1983 | * ubifs_dump_lpt_lebs - dump LPT lebs. |
1990 | * @c: UBIFS file-system description object | 1984 | * @c: UBIFS file-system description object |
1991 | * | 1985 | * |
1992 | * This function dumps all LPT LEBs. The caller has to make sure the LPT is | 1986 | * This function dumps all LPT LEBs. The caller has to make sure the LPT is |
1993 | * locked. | 1987 | * locked. |
1994 | */ | 1988 | */ |
1995 | void dbg_dump_lpt_lebs(const struct ubifs_info *c) | 1989 | void ubifs_dump_lpt_lebs(const struct ubifs_info *c) |
1996 | { | 1990 | { |
1997 | int i; | 1991 | int i; |
1998 | 1992 | ||
@@ -2046,5 +2040,3 @@ static int dbg_populate_lsave(struct ubifs_info *c) | |||
2046 | 2040 | ||
2047 | return 1; | 2041 | return 1; |
2048 | } | 2042 | } |
2049 | |||
2050 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 278c2382e8c2..ab83ace9910a 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c | |||
@@ -241,7 +241,7 @@ static int validate_master(const struct ubifs_info *c) | |||
241 | 241 | ||
242 | out: | 242 | out: |
243 | ubifs_err("bad master node at offset %d error %d", c->mst_offs, err); | 243 | ubifs_err("bad master node at offset %d error %d", c->mst_offs, err); |
244 | dbg_dump_node(c, c->mst_node); | 244 | ubifs_dump_node(c, c->mst_node); |
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | } | 246 | } |
247 | 247 | ||
@@ -317,7 +317,7 @@ int ubifs_read_master(struct ubifs_info *c) | |||
317 | if (c->leb_cnt < old_leb_cnt || | 317 | if (c->leb_cnt < old_leb_cnt || |
318 | c->leb_cnt < UBIFS_MIN_LEB_CNT) { | 318 | c->leb_cnt < UBIFS_MIN_LEB_CNT) { |
319 | ubifs_err("bad leb_cnt on master node"); | 319 | ubifs_err("bad leb_cnt on master node"); |
320 | dbg_dump_node(c, c->mst_node); | 320 | ubifs_dump_node(c, c->mst_node); |
321 | return -EINVAL; | 321 | return -EINVAL; |
322 | } | 322 | } |
323 | 323 | ||
@@ -379,7 +379,7 @@ int ubifs_write_master(struct ubifs_info *c) | |||
379 | c->mst_offs = offs; | 379 | c->mst_offs = offs; |
380 | c->mst_node->highest_inum = cpu_to_le64(c->highest_inum); | 380 | c->mst_node->highest_inum = cpu_to_le64(c->highest_inum); |
381 | 381 | ||
382 | err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM); | 382 | err = ubifs_write_node(c, c->mst_node, len, lnum, offs); |
383 | if (err) | 383 | if (err) |
384 | return err; | 384 | return err; |
385 | 385 | ||
@@ -390,7 +390,7 @@ int ubifs_write_master(struct ubifs_info *c) | |||
390 | if (err) | 390 | if (err) |
391 | return err; | 391 | return err; |
392 | } | 392 | } |
393 | err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM); | 393 | err = ubifs_write_node(c, c->mst_node, len, lnum, offs); |
394 | 394 | ||
395 | return err; | 395 | return err; |
396 | } | 396 | } |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index c542c73cfa3c..b02734db187c 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
@@ -52,11 +52,7 @@ | |||
52 | * than the maximum number of orphans allowed. | 52 | * than the maximum number of orphans allowed. |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
56 | static int dbg_check_orphans(struct ubifs_info *c); | 55 | static int dbg_check_orphans(struct ubifs_info *c); |
57 | #else | ||
58 | #define dbg_check_orphans(c) 0 | ||
59 | #endif | ||
60 | 56 | ||
61 | /** | 57 | /** |
62 | * ubifs_add_orphan - add an orphan. | 58 | * ubifs_add_orphan - add an orphan. |
@@ -92,7 +88,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) | |||
92 | else if (inum > o->inum) | 88 | else if (inum > o->inum) |
93 | p = &(*p)->rb_right; | 89 | p = &(*p)->rb_right; |
94 | else { | 90 | else { |
95 | dbg_err("orphaned twice"); | 91 | ubifs_err("orphaned twice"); |
96 | spin_unlock(&c->orphan_lock); | 92 | spin_unlock(&c->orphan_lock); |
97 | kfree(orphan); | 93 | kfree(orphan); |
98 | return 0; | 94 | return 0; |
@@ -158,8 +154,8 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) | |||
158 | } | 154 | } |
159 | } | 155 | } |
160 | spin_unlock(&c->orphan_lock); | 156 | spin_unlock(&c->orphan_lock); |
161 | dbg_err("missing orphan ino %lu", (unsigned long)inum); | 157 | ubifs_err("missing orphan ino %lu", (unsigned long)inum); |
162 | dbg_dump_stack(); | 158 | dump_stack(); |
163 | } | 159 | } |
164 | 160 | ||
165 | /** | 161 | /** |
@@ -248,8 +244,7 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic) | |||
248 | ubifs_assert(c->ohead_offs == 0); | 244 | ubifs_assert(c->ohead_offs == 0); |
249 | ubifs_prepare_node(c, c->orph_buf, len, 1); | 245 | ubifs_prepare_node(c, c->orph_buf, len, 1); |
250 | len = ALIGN(len, c->min_io_size); | 246 | len = ALIGN(len, c->min_io_size); |
251 | err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len, | 247 | err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len); |
252 | UBI_SHORTTERM); | ||
253 | } else { | 248 | } else { |
254 | if (c->ohead_offs == 0) { | 249 | if (c->ohead_offs == 0) { |
255 | /* Ensure LEB has been unmapped */ | 250 | /* Ensure LEB has been unmapped */ |
@@ -258,7 +253,7 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic) | |||
258 | return err; | 253 | return err; |
259 | } | 254 | } |
260 | err = ubifs_write_node(c, c->orph_buf, len, c->ohead_lnum, | 255 | err = ubifs_write_node(c, c->orph_buf, len, c->ohead_lnum, |
261 | c->ohead_offs, UBI_SHORTTERM); | 256 | c->ohead_offs); |
262 | } | 257 | } |
263 | return err; | 258 | return err; |
264 | } | 259 | } |
@@ -569,7 +564,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
569 | if (snod->type != UBIFS_ORPH_NODE) { | 564 | if (snod->type != UBIFS_ORPH_NODE) { |
570 | ubifs_err("invalid node type %d in orphan area at " | 565 | ubifs_err("invalid node type %d in orphan area at " |
571 | "%d:%d", snod->type, sleb->lnum, snod->offs); | 566 | "%d:%d", snod->type, sleb->lnum, snod->offs); |
572 | dbg_dump_node(c, snod->node); | 567 | ubifs_dump_node(c, snod->node); |
573 | return -EINVAL; | 568 | return -EINVAL; |
574 | } | 569 | } |
575 | 570 | ||
@@ -597,7 +592,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
597 | ubifs_err("out of order commit number %llu in " | 592 | ubifs_err("out of order commit number %llu in " |
598 | "orphan node at %d:%d", | 593 | "orphan node at %d:%d", |
599 | cmt_no, sleb->lnum, snod->offs); | 594 | cmt_no, sleb->lnum, snod->offs); |
600 | dbg_dump_node(c, snod->node); | 595 | ubifs_dump_node(c, snod->node); |
601 | return -EINVAL; | 596 | return -EINVAL; |
602 | } | 597 | } |
603 | dbg_rcvry("out of date LEB %d", sleb->lnum); | 598 | dbg_rcvry("out of date LEB %d", sleb->lnum); |
@@ -725,7 +720,9 @@ int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only) | |||
725 | return err; | 720 | return err; |
726 | } | 721 | } |
727 | 722 | ||
728 | #ifdef CONFIG_UBIFS_FS_DEBUG | 723 | /* |
724 | * Everything below is related to debugging. | ||
725 | */ | ||
729 | 726 | ||
730 | struct check_orphan { | 727 | struct check_orphan { |
731 | struct rb_node rb; | 728 | struct rb_node rb; |
@@ -968,5 +965,3 @@ out: | |||
968 | kfree(ci.node); | 965 | kfree(ci.node); |
969 | return err; | 966 | return err; |
970 | } | 967 | } |
971 | |||
972 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 2a935b317232..c30d976b4be8 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -213,10 +213,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, | |||
213 | mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); | 213 | mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); |
214 | 214 | ||
215 | ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); | 215 | ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); |
216 | err = ubifs_leb_change(c, lnum, mst, sz, UBI_SHORTTERM); | 216 | err = ubifs_leb_change(c, lnum, mst, sz); |
217 | if (err) | 217 | if (err) |
218 | goto out; | 218 | goto out; |
219 | err = ubifs_leb_change(c, lnum + 1, mst, sz, UBI_SHORTTERM); | 219 | err = ubifs_leb_change(c, lnum + 1, mst, sz); |
220 | if (err) | 220 | if (err) |
221 | goto out; | 221 | goto out; |
222 | out: | 222 | out: |
@@ -362,12 +362,12 @@ out_err: | |||
362 | out_free: | 362 | out_free: |
363 | ubifs_err("failed to recover master node"); | 363 | ubifs_err("failed to recover master node"); |
364 | if (mst1) { | 364 | if (mst1) { |
365 | dbg_err("dumping first master node"); | 365 | ubifs_err("dumping first master node"); |
366 | dbg_dump_node(c, mst1); | 366 | ubifs_dump_node(c, mst1); |
367 | } | 367 | } |
368 | if (mst2) { | 368 | if (mst2) { |
369 | dbg_err("dumping second master node"); | 369 | ubifs_err("dumping second master node"); |
370 | dbg_dump_node(c, mst2); | 370 | ubifs_dump_node(c, mst2); |
371 | } | 371 | } |
372 | vfree(buf2); | 372 | vfree(buf2); |
373 | vfree(buf1); | 373 | vfree(buf1); |
@@ -555,8 +555,7 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
555 | ubifs_pad(c, buf, pad_len); | 555 | ubifs_pad(c, buf, pad_len); |
556 | } | 556 | } |
557 | } | 557 | } |
558 | err = ubifs_leb_change(c, lnum, sleb->buf, len, | 558 | err = ubifs_leb_change(c, lnum, sleb->buf, len); |
559 | UBI_UNKNOWN); | ||
560 | if (err) | 559 | if (err) |
561 | return err; | 560 | return err; |
562 | } | 561 | } |
@@ -683,7 +682,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
683 | ret, lnum, offs); | 682 | ret, lnum, offs); |
684 | break; | 683 | break; |
685 | } else { | 684 | } else { |
686 | dbg_err("unexpected return value %d", ret); | 685 | ubifs_err("unexpected return value %d", ret); |
687 | err = -EINVAL; | 686 | err = -EINVAL; |
688 | goto error; | 687 | goto error; |
689 | } | 688 | } |
@@ -789,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
789 | 788 | ||
790 | corrupted_rescan: | 789 | corrupted_rescan: |
791 | /* Re-scan the corrupted data with verbose messages */ | 790 | /* Re-scan the corrupted data with verbose messages */ |
792 | dbg_err("corruptio %d", ret); | 791 | ubifs_err("corruptio %d", ret); |
793 | ubifs_scan_a_node(c, buf, len, lnum, offs, 1); | 792 | ubifs_scan_a_node(c, buf, len, lnum, offs, 1); |
794 | corrupted: | 793 | corrupted: |
795 | ubifs_scanned_corruption(c, lnum, offs, buf); | 794 | ubifs_scanned_corruption(c, lnum, offs, buf); |
@@ -827,17 +826,17 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, | |||
827 | goto out_free; | 826 | goto out_free; |
828 | ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); | 827 | ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); |
829 | if (ret != SCANNED_A_NODE) { | 828 | if (ret != SCANNED_A_NODE) { |
830 | dbg_err("Not a valid node"); | 829 | ubifs_err("Not a valid node"); |
831 | goto out_err; | 830 | goto out_err; |
832 | } | 831 | } |
833 | if (cs_node->ch.node_type != UBIFS_CS_NODE) { | 832 | if (cs_node->ch.node_type != UBIFS_CS_NODE) { |
834 | dbg_err("Node a CS node, type is %d", cs_node->ch.node_type); | 833 | ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type); |
835 | goto out_err; | 834 | goto out_err; |
836 | } | 835 | } |
837 | if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) { | 836 | if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) { |
838 | dbg_err("CS node cmt_no %llu != current cmt_no %llu", | 837 | ubifs_err("CS node cmt_no %llu != current cmt_no %llu", |
839 | (unsigned long long)le64_to_cpu(cs_node->cmt_no), | 838 | (unsigned long long)le64_to_cpu(cs_node->cmt_no), |
840 | c->cmt_no); | 839 | c->cmt_no); |
841 | goto out_err; | 840 | goto out_err; |
842 | } | 841 | } |
843 | *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum); | 842 | *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum); |
@@ -941,7 +940,7 @@ static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) | |||
941 | err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); | 940 | err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); |
942 | if (err) | 941 | if (err) |
943 | return err; | 942 | return err; |
944 | return ubifs_leb_change(c, lnum, sbuf, offs, UBI_UNKNOWN); | 943 | return ubifs_leb_change(c, lnum, sbuf, offs); |
945 | } | 944 | } |
946 | 945 | ||
947 | return 0; | 946 | return 0; |
@@ -1071,7 +1070,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c, | |||
1071 | } | 1070 | } |
1072 | 1071 | ||
1073 | /* Write back the LEB atomically */ | 1072 | /* Write back the LEB atomically */ |
1074 | err = ubifs_leb_change(c, lnum, sbuf, len, UBI_UNKNOWN); | 1073 | err = ubifs_leb_change(c, lnum, sbuf, len); |
1075 | if (err) | 1074 | if (err) |
1076 | return err; | 1075 | return err; |
1077 | 1076 | ||
@@ -1138,9 +1137,9 @@ static int grab_empty_leb(struct ubifs_info *c) | |||
1138 | */ | 1137 | */ |
1139 | lnum = ubifs_find_free_leb_for_idx(c); | 1138 | lnum = ubifs_find_free_leb_for_idx(c); |
1140 | if (lnum < 0) { | 1139 | if (lnum < 0) { |
1141 | dbg_err("could not find an empty LEB"); | 1140 | ubifs_err("could not find an empty LEB"); |
1142 | dbg_dump_lprops(c); | 1141 | ubifs_dump_lprops(c); |
1143 | dbg_dump_budg(c, &c->bi); | 1142 | ubifs_dump_budg(c, &c->bi); |
1144 | return lnum; | 1143 | return lnum; |
1145 | } | 1144 | } |
1146 | 1145 | ||
@@ -1218,7 +1217,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c) | |||
1218 | } | 1217 | } |
1219 | mutex_unlock(&wbuf->io_mutex); | 1218 | mutex_unlock(&wbuf->io_mutex); |
1220 | if (err < 0) { | 1219 | if (err < 0) { |
1221 | dbg_err("GC failed, error %d", err); | 1220 | ubifs_err("GC failed, error %d", err); |
1222 | if (err == -EAGAIN) | 1221 | if (err == -EAGAIN) |
1223 | err = -EINVAL; | 1222 | err = -EINVAL; |
1224 | return err; | 1223 | return err; |
@@ -1472,7 +1471,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) | |||
1472 | len -= 1; | 1471 | len -= 1; |
1473 | len = ALIGN(len + 1, c->min_io_size); | 1472 | len = ALIGN(len + 1, c->min_io_size); |
1474 | /* Atomically write the fixed LEB back again */ | 1473 | /* Atomically write the fixed LEB back again */ |
1475 | err = ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN); | 1474 | err = ubifs_leb_change(c, lnum, c->sbuf, len); |
1476 | if (err) | 1475 | if (err) |
1477 | goto out; | 1476 | goto out; |
1478 | dbg_rcvry("inode %lu at %d:%d size %lld -> %lld", | 1477 | dbg_rcvry("inode %lu at %d:%d size %lld -> %lld", |
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index b007637f0406..3a2da7e476e5 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c | |||
@@ -154,8 +154,7 @@ static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b) | |||
154 | 154 | ||
155 | /* Make sure the journal head points to the latest bud */ | 155 | /* Make sure the journal head points to the latest bud */ |
156 | err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf, | 156 | err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf, |
157 | b->bud->lnum, c->leb_size - b->free, | 157 | b->bud->lnum, c->leb_size - b->free); |
158 | UBI_SHORTTERM); | ||
159 | 158 | ||
160 | out: | 159 | out: |
161 | ubifs_release_lprops(c); | 160 | ubifs_release_lprops(c); |
@@ -686,7 +685,7 @@ out: | |||
686 | 685 | ||
687 | out_dump: | 686 | out_dump: |
688 | ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs); | 687 | ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs); |
689 | dbg_dump_node(c, snod->node); | 688 | ubifs_dump_node(c, snod->node); |
690 | ubifs_scan_destroy(sleb); | 689 | ubifs_scan_destroy(sleb); |
691 | return -EINVAL; | 690 | return -EINVAL; |
692 | } | 691 | } |
@@ -861,16 +860,16 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) | |||
861 | * numbers. | 860 | * numbers. |
862 | */ | 861 | */ |
863 | if (snod->type != UBIFS_CS_NODE) { | 862 | if (snod->type != UBIFS_CS_NODE) { |
864 | dbg_err("first log node at LEB %d:%d is not CS node", | 863 | ubifs_err("first log node at LEB %d:%d is not CS node", |
865 | lnum, offs); | 864 | lnum, offs); |
866 | goto out_dump; | 865 | goto out_dump; |
867 | } | 866 | } |
868 | if (le64_to_cpu(node->cmt_no) != c->cmt_no) { | 867 | if (le64_to_cpu(node->cmt_no) != c->cmt_no) { |
869 | dbg_err("first CS node at LEB %d:%d has wrong " | 868 | ubifs_err("first CS node at LEB %d:%d has wrong " |
870 | "commit number %llu expected %llu", | 869 | "commit number %llu expected %llu", |
871 | lnum, offs, | 870 | lnum, offs, |
872 | (unsigned long long)le64_to_cpu(node->cmt_no), | 871 | (unsigned long long)le64_to_cpu(node->cmt_no), |
873 | c->cmt_no); | 872 | c->cmt_no); |
874 | goto out_dump; | 873 | goto out_dump; |
875 | } | 874 | } |
876 | 875 | ||
@@ -892,7 +891,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) | |||
892 | 891 | ||
893 | /* Make sure the first node sits at offset zero of the LEB */ | 892 | /* Make sure the first node sits at offset zero of the LEB */ |
894 | if (snod->offs != 0) { | 893 | if (snod->offs != 0) { |
895 | dbg_err("first node is not at zero offset"); | 894 | ubifs_err("first node is not at zero offset"); |
896 | goto out_dump; | 895 | goto out_dump; |
897 | } | 896 | } |
898 | 897 | ||
@@ -905,8 +904,8 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) | |||
905 | } | 904 | } |
906 | 905 | ||
907 | if (snod->sqnum < c->cs_sqnum) { | 906 | if (snod->sqnum < c->cs_sqnum) { |
908 | dbg_err("bad sqnum %llu, commit sqnum %llu", | 907 | ubifs_err("bad sqnum %llu, commit sqnum %llu", |
909 | snod->sqnum, c->cs_sqnum); | 908 | snod->sqnum, c->cs_sqnum); |
910 | goto out_dump; | 909 | goto out_dump; |
911 | } | 910 | } |
912 | 911 | ||
@@ -958,7 +957,7 @@ out: | |||
958 | out_dump: | 957 | out_dump: |
959 | ubifs_err("log error detected while replaying the log at LEB %d:%d", | 958 | ubifs_err("log error detected while replaying the log at LEB %d:%d", |
960 | lnum, offs + snod->offs); | 959 | lnum, offs + snod->offs); |
961 | dbg_dump_node(c, snod->node); | 960 | ubifs_dump_node(c, snod->node); |
962 | ubifs_scan_destroy(sleb); | 961 | ubifs_scan_destroy(sleb); |
963 | return -EINVAL; | 962 | return -EINVAL; |
964 | } | 963 | } |
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 771f7fb6ce92..ef3d1ba6d992 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c | |||
@@ -130,7 +130,6 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
130 | * orphan node. | 130 | * orphan node. |
131 | */ | 131 | */ |
132 | orph_lebs = UBIFS_MIN_ORPH_LEBS; | 132 | orph_lebs = UBIFS_MIN_ORPH_LEBS; |
133 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
134 | if (c->leb_cnt - min_leb_cnt > 1) | 133 | if (c->leb_cnt - min_leb_cnt > 1) |
135 | /* | 134 | /* |
136 | * For debugging purposes it is better to have at least 2 | 135 | * For debugging purposes it is better to have at least 2 |
@@ -138,7 +137,6 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
138 | * consolidations and would be stressed more. | 137 | * consolidations and would be stressed more. |
139 | */ | 138 | */ |
140 | orph_lebs += 1; | 139 | orph_lebs += 1; |
141 | #endif | ||
142 | 140 | ||
143 | main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs; | 141 | main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs; |
144 | main_lebs -= orph_lebs; | 142 | main_lebs -= orph_lebs; |
@@ -196,7 +194,7 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
196 | sup->rp_size = cpu_to_le64(tmp64); | 194 | sup->rp_size = cpu_to_le64(tmp64); |
197 | sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION); | 195 | sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION); |
198 | 196 | ||
199 | err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0, UBI_LONGTERM); | 197 | err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0); |
200 | kfree(sup); | 198 | kfree(sup); |
201 | if (err) | 199 | if (err) |
202 | return err; | 200 | return err; |
@@ -252,14 +250,13 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
252 | 250 | ||
253 | mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ); | 251 | mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ); |
254 | 252 | ||
255 | err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0, | 253 | err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0); |
256 | UBI_UNKNOWN); | ||
257 | if (err) { | 254 | if (err) { |
258 | kfree(mst); | 255 | kfree(mst); |
259 | return err; | 256 | return err; |
260 | } | 257 | } |
261 | err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, 0, | 258 | err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, |
262 | UBI_UNKNOWN); | 259 | 0); |
263 | kfree(mst); | 260 | kfree(mst); |
264 | if (err) | 261 | if (err) |
265 | return err; | 262 | return err; |
@@ -282,8 +279,7 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
282 | key_write_idx(c, &key, &br->key); | 279 | key_write_idx(c, &key, &br->key); |
283 | br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB); | 280 | br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB); |
284 | br->len = cpu_to_le32(UBIFS_INO_NODE_SZ); | 281 | br->len = cpu_to_le32(UBIFS_INO_NODE_SZ); |
285 | err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0, | 282 | err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0); |
286 | UBI_UNKNOWN); | ||
287 | kfree(idx); | 283 | kfree(idx); |
288 | if (err) | 284 | if (err) |
289 | return err; | 285 | return err; |
@@ -315,8 +311,7 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
315 | ino->flags = cpu_to_le32(UBIFS_COMPR_FL); | 311 | ino->flags = cpu_to_le32(UBIFS_COMPR_FL); |
316 | 312 | ||
317 | err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ, | 313 | err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ, |
318 | main_first + DEFAULT_DATA_LEB, 0, | 314 | main_first + DEFAULT_DATA_LEB, 0); |
319 | UBI_UNKNOWN); | ||
320 | kfree(ino); | 315 | kfree(ino); |
321 | if (err) | 316 | if (err) |
322 | return err; | 317 | return err; |
@@ -335,8 +330,7 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
335 | return -ENOMEM; | 330 | return -ENOMEM; |
336 | 331 | ||
337 | cs->ch.node_type = UBIFS_CS_NODE; | 332 | cs->ch.node_type = UBIFS_CS_NODE; |
338 | err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, | 333 | err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); |
339 | 0, UBI_UNKNOWN); | ||
340 | kfree(cs); | 334 | kfree(cs); |
341 | 335 | ||
342 | ubifs_msg("default file-system created"); | 336 | ubifs_msg("default file-system created"); |
@@ -475,7 +469,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) | |||
475 | 469 | ||
476 | failed: | 470 | failed: |
477 | ubifs_err("bad superblock, error %d", err); | 471 | ubifs_err("bad superblock, error %d", err); |
478 | dbg_dump_node(c, sup); | 472 | ubifs_dump_node(c, sup); |
479 | return -EINVAL; | 473 | return -EINVAL; |
480 | } | 474 | } |
481 | 475 | ||
@@ -518,7 +512,7 @@ int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup) | |||
518 | int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); | 512 | int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); |
519 | 513 | ||
520 | ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1); | 514 | ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1); |
521 | return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len, UBI_LONGTERM); | 515 | return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len); |
522 | } | 516 | } |
523 | 517 | ||
524 | /** | 518 | /** |
@@ -691,7 +685,7 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len) | |||
691 | if (err) | 685 | if (err) |
692 | return err; | 686 | return err; |
693 | 687 | ||
694 | return ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN); | 688 | return ubifs_leb_change(c, lnum, c->sbuf, len); |
695 | } | 689 | } |
696 | 690 | ||
697 | /** | 691 | /** |
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 37383e8011b1..7c40e6025fd6 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c | |||
@@ -101,7 +101,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, | |||
101 | if (!quiet) { | 101 | if (!quiet) { |
102 | ubifs_err("bad pad node at LEB %d:%d", | 102 | ubifs_err("bad pad node at LEB %d:%d", |
103 | lnum, offs); | 103 | lnum, offs); |
104 | dbg_dump_node(c, pad); | 104 | ubifs_dump_node(c, pad); |
105 | } | 105 | } |
106 | return SCANNED_A_BAD_PAD_NODE; | 106 | return SCANNED_A_BAD_PAD_NODE; |
107 | } | 107 | } |
@@ -109,8 +109,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, | |||
109 | /* Make the node pads to 8-byte boundary */ | 109 | /* Make the node pads to 8-byte boundary */ |
110 | if ((node_len + pad_len) & 7) { | 110 | if ((node_len + pad_len) & 7) { |
111 | if (!quiet) | 111 | if (!quiet) |
112 | dbg_err("bad padding length %d - %d", | 112 | ubifs_err("bad padding length %d - %d", |
113 | offs, offs + node_len + pad_len); | 113 | offs, offs + node_len + pad_len); |
114 | return SCANNED_A_BAD_PAD_NODE; | 114 | return SCANNED_A_BAD_PAD_NODE; |
115 | } | 115 | } |
116 | 116 | ||
@@ -245,7 +245,7 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, | |||
245 | len = c->leb_size - offs; | 245 | len = c->leb_size - offs; |
246 | if (len > 8192) | 246 | if (len > 8192) |
247 | len = 8192; | 247 | len = 8192; |
248 | dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs); | 248 | ubifs_err("first %d bytes from LEB %d:%d", len, lnum, offs); |
249 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); | 249 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); |
250 | } | 250 | } |
251 | 251 | ||
@@ -300,16 +300,16 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, | |||
300 | 300 | ||
301 | switch (ret) { | 301 | switch (ret) { |
302 | case SCANNED_GARBAGE: | 302 | case SCANNED_GARBAGE: |
303 | dbg_err("garbage"); | 303 | ubifs_err("garbage"); |
304 | goto corrupted; | 304 | goto corrupted; |
305 | case SCANNED_A_NODE: | 305 | case SCANNED_A_NODE: |
306 | break; | 306 | break; |
307 | case SCANNED_A_CORRUPT_NODE: | 307 | case SCANNED_A_CORRUPT_NODE: |
308 | case SCANNED_A_BAD_PAD_NODE: | 308 | case SCANNED_A_BAD_PAD_NODE: |
309 | dbg_err("bad node"); | 309 | ubifs_err("bad node"); |
310 | goto corrupted; | 310 | goto corrupted; |
311 | default: | 311 | default: |
312 | dbg_err("unknown"); | 312 | ubifs_err("unknown"); |
313 | err = -EINVAL; | 313 | err = -EINVAL; |
314 | goto error; | 314 | goto error; |
315 | } | 315 | } |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 76e4e0566ad6..001acccac0d6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -246,8 +246,8 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum) | |||
246 | 246 | ||
247 | out_invalid: | 247 | out_invalid: |
248 | ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err); | 248 | ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err); |
249 | dbg_dump_node(c, ino); | 249 | ubifs_dump_node(c, ino); |
250 | dbg_dump_inode(c, inode); | 250 | ubifs_dump_inode(c, inode); |
251 | err = -EINVAL; | 251 | err = -EINVAL; |
252 | out_ino: | 252 | out_ino: |
253 | kfree(ino); | 253 | kfree(ino); |
@@ -668,8 +668,8 @@ static int init_constants_sb(struct ubifs_info *c) | |||
668 | tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt; | 668 | tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt; |
669 | tmp = ALIGN(tmp, c->min_io_size); | 669 | tmp = ALIGN(tmp, c->min_io_size); |
670 | if (tmp > c->leb_size) { | 670 | if (tmp > c->leb_size) { |
671 | dbg_err("too small LEB size %d, at least %d needed", | 671 | ubifs_err("too small LEB size %d, at least %d needed", |
672 | c->leb_size, tmp); | 672 | c->leb_size, tmp); |
673 | return -EINVAL; | 673 | return -EINVAL; |
674 | } | 674 | } |
675 | 675 | ||
@@ -683,8 +683,8 @@ static int init_constants_sb(struct ubifs_info *c) | |||
683 | tmp /= c->leb_size; | 683 | tmp /= c->leb_size; |
684 | tmp += 1; | 684 | tmp += 1; |
685 | if (c->log_lebs < tmp) { | 685 | if (c->log_lebs < tmp) { |
686 | dbg_err("too small log %d LEBs, required min. %d LEBs", | 686 | ubifs_err("too small log %d LEBs, required min. %d LEBs", |
687 | c->log_lebs, tmp); | 687 | c->log_lebs, tmp); |
688 | return -EINVAL; | 688 | return -EINVAL; |
689 | } | 689 | } |
690 | 690 | ||
@@ -813,13 +813,10 @@ static int alloc_wbufs(struct ubifs_info *c) | |||
813 | c->jheads[i].grouped = 1; | 813 | c->jheads[i].grouped = 1; |
814 | } | 814 | } |
815 | 815 | ||
816 | c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; | ||
817 | /* | 816 | /* |
818 | * Garbage Collector head likely contains long-term data and | 817 | * Garbage Collector head does not need to be synchronized by timer. |
819 | * does not need to be synchronized by timer. Also GC head nodes are | 818 | * Also GC head nodes are not grouped. |
820 | * not grouped. | ||
821 | */ | 819 | */ |
822 | c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; | ||
823 | c->jheads[GCHD].wbuf.no_timer = 1; | 820 | c->jheads[GCHD].wbuf.no_timer = 1; |
824 | c->jheads[GCHD].grouped = 0; | 821 | c->jheads[GCHD].grouped = 0; |
825 | 822 | ||
@@ -863,7 +860,7 @@ static void free_orphans(struct ubifs_info *c) | |||
863 | orph = list_entry(c->orph_list.next, struct ubifs_orphan, list); | 860 | orph = list_entry(c->orph_list.next, struct ubifs_orphan, list); |
864 | list_del(&orph->list); | 861 | list_del(&orph->list); |
865 | kfree(orph); | 862 | kfree(orph); |
866 | dbg_err("orphan list not empty at unmount"); | 863 | ubifs_err("orphan list not empty at unmount"); |
867 | } | 864 | } |
868 | 865 | ||
869 | vfree(c->orph_buf); | 866 | vfree(c->orph_buf); |
@@ -1147,8 +1144,8 @@ static int check_free_space(struct ubifs_info *c) | |||
1147 | ubifs_assert(c->dark_wm > 0); | 1144 | ubifs_assert(c->dark_wm > 0); |
1148 | if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { | 1145 | if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { |
1149 | ubifs_err("insufficient free space to mount in R/W mode"); | 1146 | ubifs_err("insufficient free space to mount in R/W mode"); |
1150 | dbg_dump_budg(c, &c->bi); | 1147 | ubifs_dump_budg(c, &c->bi); |
1151 | dbg_dump_lprops(c); | 1148 | ubifs_dump_lprops(c); |
1152 | return -ENOSPC; | 1149 | return -ENOSPC; |
1153 | } | 1150 | } |
1154 | return 0; | 1151 | return 0; |
@@ -1301,7 +1298,7 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1301 | if (!c->ro_mount && c->space_fixup) { | 1298 | if (!c->ro_mount && c->space_fixup) { |
1302 | err = ubifs_fixup_free_space(c); | 1299 | err = ubifs_fixup_free_space(c); |
1303 | if (err) | 1300 | if (err) |
1304 | goto out_master; | 1301 | goto out_lpt; |
1305 | } | 1302 | } |
1306 | 1303 | ||
1307 | if (!c->ro_mount) { | 1304 | if (!c->ro_mount) { |
@@ -2126,8 +2123,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, | |||
2126 | */ | 2123 | */ |
2127 | ubi = open_ubi(name, UBI_READONLY); | 2124 | ubi = open_ubi(name, UBI_READONLY); |
2128 | if (IS_ERR(ubi)) { | 2125 | if (IS_ERR(ubi)) { |
2129 | dbg_err("cannot open \"%s\", error %d", | 2126 | ubifs_err("cannot open \"%s\", error %d", |
2130 | name, (int)PTR_ERR(ubi)); | 2127 | name, (int)PTR_ERR(ubi)); |
2131 | return ERR_CAST(ubi); | 2128 | return ERR_CAST(ubi); |
2132 | } | 2129 | } |
2133 | 2130 | ||
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 16ad84d8402f..349f31a30f40 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -339,8 +339,8 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
339 | 339 | ||
340 | err = ubifs_validate_entry(c, dent); | 340 | err = ubifs_validate_entry(c, dent); |
341 | if (err) { | 341 | if (err) { |
342 | dbg_dump_stack(); | 342 | dump_stack(); |
343 | dbg_dump_node(c, dent); | 343 | ubifs_dump_node(c, dent); |
344 | return err; | 344 | return err; |
345 | } | 345 | } |
346 | 346 | ||
@@ -372,8 +372,8 @@ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
372 | 372 | ||
373 | err = ubifs_validate_entry(c, node); | 373 | err = ubifs_validate_entry(c, node); |
374 | if (err) { | 374 | if (err) { |
375 | dbg_dump_stack(); | 375 | dump_stack(); |
376 | dbg_dump_node(c, node); | 376 | ubifs_dump_node(c, node); |
377 | return err; | 377 | return err; |
378 | } | 378 | } |
379 | 379 | ||
@@ -1733,8 +1733,8 @@ out_err: | |||
1733 | err = -EINVAL; | 1733 | err = -EINVAL; |
1734 | out: | 1734 | out: |
1735 | ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); | 1735 | ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); |
1736 | dbg_dump_node(c, buf); | 1736 | ubifs_dump_node(c, buf); |
1737 | dbg_dump_stack(); | 1737 | dump_stack(); |
1738 | return err; | 1738 | return err; |
1739 | } | 1739 | } |
1740 | 1740 | ||
@@ -1775,7 +1775,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) | |||
1775 | if (err && err != -EBADMSG) { | 1775 | if (err && err != -EBADMSG) { |
1776 | ubifs_err("failed to read from LEB %d:%d, error %d", | 1776 | ubifs_err("failed to read from LEB %d:%d, error %d", |
1777 | lnum, offs, err); | 1777 | lnum, offs, err); |
1778 | dbg_dump_stack(); | 1778 | dump_stack(); |
1779 | dbg_tnck(&bu->key, "key "); | 1779 | dbg_tnck(&bu->key, "key "); |
1780 | return err; | 1780 | return err; |
1781 | } | 1781 | } |
@@ -2361,7 +2361,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, | |||
2361 | * by passing 'ubifs_tnc_remove_nm()' the same key but | 2361 | * by passing 'ubifs_tnc_remove_nm()' the same key but |
2362 | * an unmatchable name. | 2362 | * an unmatchable name. |
2363 | */ | 2363 | */ |
2364 | struct qstr noname = { .len = 0, .name = "" }; | 2364 | struct qstr noname = { .name = "" }; |
2365 | 2365 | ||
2366 | err = dbg_check_tnc(c, 0); | 2366 | err = dbg_check_tnc(c, 0); |
2367 | mutex_unlock(&c->tnc_mutex); | 2367 | mutex_unlock(&c->tnc_mutex); |
@@ -2403,7 +2403,7 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) | |||
2403 | 2403 | ||
2404 | err = ubifs_add_dirt(c, zbr->lnum, zbr->len); | 2404 | err = ubifs_add_dirt(c, zbr->lnum, zbr->len); |
2405 | if (err) { | 2405 | if (err) { |
2406 | dbg_dump_znode(c, znode); | 2406 | ubifs_dump_znode(c, znode); |
2407 | return err; | 2407 | return err; |
2408 | } | 2408 | } |
2409 | 2409 | ||
@@ -2649,7 +2649,7 @@ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, | |||
2649 | err = ubifs_add_dirt(c, znode->zbranch[i].lnum, | 2649 | err = ubifs_add_dirt(c, znode->zbranch[i].lnum, |
2650 | znode->zbranch[i].len); | 2650 | znode->zbranch[i].len); |
2651 | if (err) { | 2651 | if (err) { |
2652 | dbg_dump_znode(c, znode); | 2652 | ubifs_dump_znode(c, znode); |
2653 | goto out_unlock; | 2653 | goto out_unlock; |
2654 | } | 2654 | } |
2655 | dbg_tnck(key, "removing key "); | 2655 | dbg_tnck(key, "removing key "); |
@@ -3275,8 +3275,6 @@ out_unlock: | |||
3275 | return err; | 3275 | return err; |
3276 | } | 3276 | } |
3277 | 3277 | ||
3278 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
3279 | |||
3280 | /** | 3278 | /** |
3281 | * dbg_check_inode_size - check if inode size is correct. | 3279 | * dbg_check_inode_size - check if inode size is correct. |
3282 | * @c: UBIFS file-system description object | 3280 | * @c: UBIFS file-system description object |
@@ -3335,13 +3333,11 @@ out_dump: | |||
3335 | (unsigned long)inode->i_ino, size, | 3333 | (unsigned long)inode->i_ino, size, |
3336 | ((loff_t)block) << UBIFS_BLOCK_SHIFT); | 3334 | ((loff_t)block) << UBIFS_BLOCK_SHIFT); |
3337 | mutex_unlock(&c->tnc_mutex); | 3335 | mutex_unlock(&c->tnc_mutex); |
3338 | dbg_dump_inode(c, inode); | 3336 | ubifs_dump_inode(c, inode); |
3339 | dbg_dump_stack(); | 3337 | dump_stack(); |
3340 | return -EINVAL; | 3338 | return -EINVAL; |
3341 | 3339 | ||
3342 | out_unlock: | 3340 | out_unlock: |
3343 | mutex_unlock(&c->tnc_mutex); | 3341 | mutex_unlock(&c->tnc_mutex); |
3344 | return err; | 3342 | return err; |
3345 | } | 3343 | } |
3346 | |||
3347 | #endif /* CONFIG_UBIFS_FS_DEBUG */ | ||
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 4c15f07a8bb2..523bbad69c0c 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c | |||
@@ -54,18 +54,16 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, | |||
54 | br->len = cpu_to_le32(zbr->len); | 54 | br->len = cpu_to_le32(zbr->len); |
55 | if (!zbr->lnum || !zbr->len) { | 55 | if (!zbr->lnum || !zbr->len) { |
56 | ubifs_err("bad ref in znode"); | 56 | ubifs_err("bad ref in znode"); |
57 | dbg_dump_znode(c, znode); | 57 | ubifs_dump_znode(c, znode); |
58 | if (zbr->znode) | 58 | if (zbr->znode) |
59 | dbg_dump_znode(c, zbr->znode); | 59 | ubifs_dump_znode(c, zbr->znode); |
60 | } | 60 | } |
61 | } | 61 | } |
62 | ubifs_prepare_node(c, idx, len, 0); | 62 | ubifs_prepare_node(c, idx, len, 0); |
63 | 63 | ||
64 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
65 | znode->lnum = lnum; | 64 | znode->lnum = lnum; |
66 | znode->offs = offs; | 65 | znode->offs = offs; |
67 | znode->len = len; | 66 | znode->len = len; |
68 | #endif | ||
69 | 67 | ||
70 | err = insert_old_idx_znode(c, znode); | 68 | err = insert_old_idx_znode(c, znode); |
71 | 69 | ||
@@ -322,8 +320,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p) | |||
322 | 0, 0, 0); | 320 | 0, 0, 0); |
323 | if (err) | 321 | if (err) |
324 | return err; | 322 | return err; |
325 | err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len, | 323 | err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len); |
326 | UBI_SHORTTERM); | ||
327 | if (err) | 324 | if (err) |
328 | return err; | 325 | return err; |
329 | dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written); | 326 | dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written); |
@@ -388,8 +385,8 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) | |||
388 | * option which forces in-the-gaps is enabled. | 385 | * option which forces in-the-gaps is enabled. |
389 | */ | 386 | */ |
390 | ubifs_warn("out of space"); | 387 | ubifs_warn("out of space"); |
391 | dbg_dump_budg(c, &c->bi); | 388 | ubifs_dump_budg(c, &c->bi); |
392 | dbg_dump_lprops(c); | 389 | ubifs_dump_lprops(c); |
393 | } | 390 | } |
394 | /* Try to commit anyway */ | 391 | /* Try to commit anyway */ |
395 | err = 0; | 392 | err = 0; |
@@ -456,11 +453,9 @@ static int layout_in_empty_space(struct ubifs_info *c) | |||
456 | 453 | ||
457 | offs = buf_offs + used; | 454 | offs = buf_offs + used; |
458 | 455 | ||
459 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
460 | znode->lnum = lnum; | 456 | znode->lnum = lnum; |
461 | znode->offs = offs; | 457 | znode->offs = offs; |
462 | znode->len = len; | 458 | znode->len = len; |
463 | #endif | ||
464 | 459 | ||
465 | /* Update the parent */ | 460 | /* Update the parent */ |
466 | zp = znode->parent; | 461 | zp = znode->parent; |
@@ -536,10 +531,8 @@ static int layout_in_empty_space(struct ubifs_info *c) | |||
536 | break; | 531 | break; |
537 | } | 532 | } |
538 | 533 | ||
539 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
540 | c->dbg->new_ihead_lnum = lnum; | 534 | c->dbg->new_ihead_lnum = lnum; |
541 | c->dbg->new_ihead_offs = buf_offs; | 535 | c->dbg->new_ihead_offs = buf_offs; |
542 | #endif | ||
543 | 536 | ||
544 | return 0; | 537 | return 0; |
545 | } | 538 | } |
@@ -864,9 +857,9 @@ static int write_index(struct ubifs_info *c) | |||
864 | br->len = cpu_to_le32(zbr->len); | 857 | br->len = cpu_to_le32(zbr->len); |
865 | if (!zbr->lnum || !zbr->len) { | 858 | if (!zbr->lnum || !zbr->len) { |
866 | ubifs_err("bad ref in znode"); | 859 | ubifs_err("bad ref in znode"); |
867 | dbg_dump_znode(c, znode); | 860 | ubifs_dump_znode(c, znode); |
868 | if (zbr->znode) | 861 | if (zbr->znode) |
869 | dbg_dump_znode(c, zbr->znode); | 862 | ubifs_dump_znode(c, zbr->znode); |
870 | } | 863 | } |
871 | } | 864 | } |
872 | len = ubifs_idx_node_sz(c, znode->child_cnt); | 865 | len = ubifs_idx_node_sz(c, znode->child_cnt); |
@@ -881,13 +874,11 @@ static int write_index(struct ubifs_info *c) | |||
881 | } | 874 | } |
882 | offs = buf_offs + used; | 875 | offs = buf_offs + used; |
883 | 876 | ||
884 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
885 | if (lnum != znode->lnum || offs != znode->offs || | 877 | if (lnum != znode->lnum || offs != znode->offs || |
886 | len != znode->len) { | 878 | len != znode->len) { |
887 | ubifs_err("inconsistent znode posn"); | 879 | ubifs_err("inconsistent znode posn"); |
888 | return -EINVAL; | 880 | return -EINVAL; |
889 | } | 881 | } |
890 | #endif | ||
891 | 882 | ||
892 | /* Grab some stuff from znode while we still can */ | 883 | /* Grab some stuff from znode while we still can */ |
893 | cnext = znode->cnext; | 884 | cnext = znode->cnext; |
@@ -959,8 +950,7 @@ static int write_index(struct ubifs_info *c) | |||
959 | } | 950 | } |
960 | 951 | ||
961 | /* The buffer is full or there are no more znodes to do */ | 952 | /* The buffer is full or there are no more znodes to do */ |
962 | err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen, | 953 | err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen); |
963 | UBI_SHORTTERM); | ||
964 | if (err) | 954 | if (err) |
965 | return err; | 955 | return err; |
966 | buf_offs += blen; | 956 | buf_offs += blen; |
@@ -982,13 +972,11 @@ static int write_index(struct ubifs_info *c) | |||
982 | break; | 972 | break; |
983 | } | 973 | } |
984 | 974 | ||
985 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
986 | if (lnum != c->dbg->new_ihead_lnum || | 975 | if (lnum != c->dbg->new_ihead_lnum || |
987 | buf_offs != c->dbg->new_ihead_offs) { | 976 | buf_offs != c->dbg->new_ihead_offs) { |
988 | ubifs_err("inconsistent ihead"); | 977 | ubifs_err("inconsistent ihead"); |
989 | return -EINVAL; | 978 | return -EINVAL; |
990 | } | 979 | } |
991 | #endif | ||
992 | 980 | ||
993 | c->ihead_lnum = lnum; | 981 | c->ihead_lnum = lnum; |
994 | c->ihead_offs = buf_offs; | 982 | c->ihead_offs = buf_offs; |
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index dc28fe6ec07a..d38ac7f9654b 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c | |||
@@ -293,10 +293,10 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, | |||
293 | lnum, offs, znode->level, znode->child_cnt); | 293 | lnum, offs, znode->level, znode->child_cnt); |
294 | 294 | ||
295 | if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) { | 295 | if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) { |
296 | dbg_err("current fanout %d, branch count %d", | 296 | ubifs_err("current fanout %d, branch count %d", |
297 | c->fanout, znode->child_cnt); | 297 | c->fanout, znode->child_cnt); |
298 | dbg_err("max levels %d, znode level %d", | 298 | ubifs_err("max levels %d, znode level %d", |
299 | UBIFS_MAX_LEVELS, znode->level); | 299 | UBIFS_MAX_LEVELS, znode->level); |
300 | err = 1; | 300 | err = 1; |
301 | goto out_dump; | 301 | goto out_dump; |
302 | } | 302 | } |
@@ -316,7 +316,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, | |||
316 | if (zbr->lnum < c->main_first || | 316 | if (zbr->lnum < c->main_first || |
317 | zbr->lnum >= c->leb_cnt || zbr->offs < 0 || | 317 | zbr->lnum >= c->leb_cnt || zbr->offs < 0 || |
318 | zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { | 318 | zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { |
319 | dbg_err("bad branch %d", i); | 319 | ubifs_err("bad branch %d", i); |
320 | err = 2; | 320 | err = 2; |
321 | goto out_dump; | 321 | goto out_dump; |
322 | } | 322 | } |
@@ -340,19 +340,19 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, | |||
340 | type = key_type(c, &zbr->key); | 340 | type = key_type(c, &zbr->key); |
341 | if (c->ranges[type].max_len == 0) { | 341 | if (c->ranges[type].max_len == 0) { |
342 | if (zbr->len != c->ranges[type].len) { | 342 | if (zbr->len != c->ranges[type].len) { |
343 | dbg_err("bad target node (type %d) length (%d)", | 343 | ubifs_err("bad target node (type %d) length (%d)", |
344 | type, zbr->len); | 344 | type, zbr->len); |
345 | dbg_err("have to be %d", c->ranges[type].len); | 345 | ubifs_err("have to be %d", c->ranges[type].len); |
346 | err = 4; | 346 | err = 4; |
347 | goto out_dump; | 347 | goto out_dump; |
348 | } | 348 | } |
349 | } else if (zbr->len < c->ranges[type].min_len || | 349 | } else if (zbr->len < c->ranges[type].min_len || |
350 | zbr->len > c->ranges[type].max_len) { | 350 | zbr->len > c->ranges[type].max_len) { |
351 | dbg_err("bad target node (type %d) length (%d)", | 351 | ubifs_err("bad target node (type %d) length (%d)", |
352 | type, zbr->len); | 352 | type, zbr->len); |
353 | dbg_err("have to be in range of %d-%d", | 353 | ubifs_err("have to be in range of %d-%d", |
354 | c->ranges[type].min_len, | 354 | c->ranges[type].min_len, |
355 | c->ranges[type].max_len); | 355 | c->ranges[type].max_len); |
356 | err = 5; | 356 | err = 5; |
357 | goto out_dump; | 357 | goto out_dump; |
358 | } | 358 | } |
@@ -370,13 +370,13 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, | |||
370 | 370 | ||
371 | cmp = keys_cmp(c, key1, key2); | 371 | cmp = keys_cmp(c, key1, key2); |
372 | if (cmp > 0) { | 372 | if (cmp > 0) { |
373 | dbg_err("bad key order (keys %d and %d)", i, i + 1); | 373 | ubifs_err("bad key order (keys %d and %d)", i, i + 1); |
374 | err = 6; | 374 | err = 6; |
375 | goto out_dump; | 375 | goto out_dump; |
376 | } else if (cmp == 0 && !is_hash_key(c, key1)) { | 376 | } else if (cmp == 0 && !is_hash_key(c, key1)) { |
377 | /* These can only be keys with colliding hash */ | 377 | /* These can only be keys with colliding hash */ |
378 | dbg_err("keys %d and %d are not hashed but equivalent", | 378 | ubifs_err("keys %d and %d are not hashed but equivalent", |
379 | i, i + 1); | 379 | i, i + 1); |
380 | err = 7; | 380 | err = 7; |
381 | goto out_dump; | 381 | goto out_dump; |
382 | } | 382 | } |
@@ -387,7 +387,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, | |||
387 | 387 | ||
388 | out_dump: | 388 | out_dump: |
389 | ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err); | 389 | ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err); |
390 | dbg_dump_node(c, idx); | 390 | ubifs_dump_node(c, idx); |
391 | kfree(idx); | 391 | kfree(idx); |
392 | return -EINVAL; | 392 | return -EINVAL; |
393 | } | 393 | } |
@@ -486,7 +486,7 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
486 | zbr->lnum, zbr->offs); | 486 | zbr->lnum, zbr->offs); |
487 | dbg_tnck(key, "looked for key "); | 487 | dbg_tnck(key, "looked for key "); |
488 | dbg_tnck(&key1, "but found node's key "); | 488 | dbg_tnck(&key1, "but found node's key "); |
489 | dbg_dump_node(c, node); | 489 | ubifs_dump_node(c, node); |
490 | return -EINVAL; | 490 | return -EINVAL; |
491 | } | 491 | } |
492 | 492 | ||
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 93d59aceaaef..1e5a08623d11 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -650,8 +650,6 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, | |||
650 | * @avail: number of bytes available in the write-buffer | 650 | * @avail: number of bytes available in the write-buffer |
651 | * @used: number of used bytes in the write-buffer | 651 | * @used: number of used bytes in the write-buffer |
652 | * @size: write-buffer size (in [@c->min_io_size, @c->max_write_size] range) | 652 | * @size: write-buffer size (in [@c->min_io_size, @c->max_write_size] range) |
653 | * @dtype: type of data stored in this LEB (%UBI_LONGTERM, %UBI_SHORTTERM, | ||
654 | * %UBI_UNKNOWN) | ||
655 | * @jhead: journal head the mutex belongs to (note, needed only to shut lockdep | 653 | * @jhead: journal head the mutex belongs to (note, needed only to shut lockdep |
656 | * up by 'mutex_lock_nested()). | 654 | * up by 'mutex_lock_nested()). |
657 | * @sync_callback: write-buffer synchronization callback | 655 | * @sync_callback: write-buffer synchronization callback |
@@ -685,7 +683,6 @@ struct ubifs_wbuf { | |||
685 | int avail; | 683 | int avail; |
686 | int used; | 684 | int used; |
687 | int size; | 685 | int size; |
688 | int dtype; | ||
689 | int jhead; | 686 | int jhead; |
690 | int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad); | 687 | int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad); |
691 | struct mutex io_mutex; | 688 | struct mutex io_mutex; |
@@ -762,6 +759,9 @@ struct ubifs_zbranch { | |||
762 | * @offs: offset of the corresponding indexing node | 759 | * @offs: offset of the corresponding indexing node |
763 | * @len: length of the corresponding indexing node | 760 | * @len: length of the corresponding indexing node |
764 | * @zbranch: array of znode branches (@c->fanout elements) | 761 | * @zbranch: array of znode branches (@c->fanout elements) |
762 | * | ||
763 | * Note! The @lnum, @offs, and @len fields are not really needed - we have them | ||
764 | * only for internal consistency check. They could be removed to save some RAM. | ||
765 | */ | 765 | */ |
766 | struct ubifs_znode { | 766 | struct ubifs_znode { |
767 | struct ubifs_znode *parent; | 767 | struct ubifs_znode *parent; |
@@ -772,9 +772,9 @@ struct ubifs_znode { | |||
772 | int child_cnt; | 772 | int child_cnt; |
773 | int iip; | 773 | int iip; |
774 | int alt; | 774 | int alt; |
775 | #ifdef CONFIG_UBIFS_FS_DEBUG | 775 | int lnum; |
776 | int lnum, offs, len; | 776 | int offs; |
777 | #endif | 777 | int len; |
778 | struct ubifs_zbranch zbranch[]; | 778 | struct ubifs_zbranch zbranch[]; |
779 | }; | 779 | }; |
780 | 780 | ||
@@ -1444,9 +1444,7 @@ struct ubifs_info { | |||
1444 | struct rb_root size_tree; | 1444 | struct rb_root size_tree; |
1445 | struct ubifs_mount_opts mount_opts; | 1445 | struct ubifs_mount_opts mount_opts; |
1446 | 1446 | ||
1447 | #ifdef CONFIG_UBIFS_FS_DEBUG | ||
1448 | struct ubifs_debug_info *dbg; | 1447 | struct ubifs_debug_info *dbg; |
1449 | #endif | ||
1450 | }; | 1448 | }; |
1451 | 1449 | ||
1452 | extern struct list_head ubifs_infos; | 1450 | extern struct list_head ubifs_infos; |
@@ -1468,22 +1466,20 @@ void ubifs_ro_mode(struct ubifs_info *c, int err); | |||
1468 | int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, | 1466 | int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, |
1469 | int len, int even_ebadmsg); | 1467 | int len, int even_ebadmsg); |
1470 | int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, | 1468 | int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, |
1471 | int len, int dtype); | 1469 | int len); |
1472 | int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, | 1470 | int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len); |
1473 | int dtype); | ||
1474 | int ubifs_leb_unmap(struct ubifs_info *c, int lnum); | 1471 | int ubifs_leb_unmap(struct ubifs_info *c, int lnum); |
1475 | int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype); | 1472 | int ubifs_leb_map(struct ubifs_info *c, int lnum); |
1476 | int ubifs_is_mapped(const struct ubifs_info *c, int lnum); | 1473 | int ubifs_is_mapped(const struct ubifs_info *c, int lnum); |
1477 | int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len); | 1474 | int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len); |
1478 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, | 1475 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs); |
1479 | int dtype); | ||
1480 | int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf); | 1476 | int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf); |
1481 | int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, | 1477 | int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, |
1482 | int lnum, int offs); | 1478 | int lnum, int offs); |
1483 | int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, | 1479 | int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, |
1484 | int lnum, int offs); | 1480 | int lnum, int offs); |
1485 | int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, | 1481 | int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, |
1486 | int offs, int dtype); | 1482 | int offs); |
1487 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, | 1483 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
1488 | int offs, int quiet, int must_chk_crc); | 1484 | int offs, int quiet, int must_chk_crc); |
1489 | void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); | 1485 | void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 85b272268754..0f7139bdb2c2 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -298,7 +298,7 @@ int ubifs_setxattr(struct dentry *dentry, const char *name, | |||
298 | { | 298 | { |
299 | struct inode *inode, *host = dentry->d_inode; | 299 | struct inode *inode, *host = dentry->d_inode; |
300 | struct ubifs_info *c = host->i_sb->s_fs_info; | 300 | struct ubifs_info *c = host->i_sb->s_fs_info; |
301 | struct qstr nm = { .name = name, .len = strlen(name) }; | 301 | struct qstr nm = QSTR_INIT(name, strlen(name)); |
302 | struct ubifs_dent_node *xent; | 302 | struct ubifs_dent_node *xent; |
303 | union ubifs_key key; | 303 | union ubifs_key key; |
304 | int err, type; | 304 | int err, type; |
@@ -361,7 +361,7 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, | |||
361 | { | 361 | { |
362 | struct inode *inode, *host = dentry->d_inode; | 362 | struct inode *inode, *host = dentry->d_inode; |
363 | struct ubifs_info *c = host->i_sb->s_fs_info; | 363 | struct ubifs_info *c = host->i_sb->s_fs_info; |
364 | struct qstr nm = { .name = name, .len = strlen(name) }; | 364 | struct qstr nm = QSTR_INIT(name, strlen(name)); |
365 | struct ubifs_inode *ui; | 365 | struct ubifs_inode *ui; |
366 | struct ubifs_dent_node *xent; | 366 | struct ubifs_dent_node *xent; |
367 | union ubifs_key key; | 367 | union ubifs_key key; |
@@ -399,8 +399,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, | |||
399 | if (buf) { | 399 | if (buf) { |
400 | /* If @buf is %NULL we are supposed to return the length */ | 400 | /* If @buf is %NULL we are supposed to return the length */ |
401 | if (ui->data_len > size) { | 401 | if (ui->data_len > size) { |
402 | dbg_err("buffer size %zd, xattr len %d", | 402 | ubifs_err("buffer size %zd, xattr len %d", |
403 | size, ui->data_len); | 403 | size, ui->data_len); |
404 | err = -ERANGE; | 404 | err = -ERANGE; |
405 | goto out_iput; | 405 | goto out_iput; |
406 | } | 406 | } |
@@ -524,7 +524,7 @@ int ubifs_removexattr(struct dentry *dentry, const char *name) | |||
524 | { | 524 | { |
525 | struct inode *inode, *host = dentry->d_inode; | 525 | struct inode *inode, *host = dentry->d_inode; |
526 | struct ubifs_info *c = host->i_sb->s_fs_info; | 526 | struct ubifs_info *c = host->i_sb->s_fs_info; |
527 | struct qstr nm = { .name = name, .len = strlen(name) }; | 527 | struct qstr nm = QSTR_INIT(name, strlen(name)); |
528 | struct ubifs_dent_node *xent; | 528 | struct ubifs_dent_node *xent; |
529 | union ubifs_key key; | 529 | union ubifs_key key; |
530 | int err; | 530 | int err; |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 38de8f234b94..a165c66e3eef 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
@@ -1193,7 +1193,7 @@ static struct dentry *udf_get_parent(struct dentry *child) | |||
1193 | { | 1193 | { |
1194 | struct kernel_lb_addr tloc; | 1194 | struct kernel_lb_addr tloc; |
1195 | struct inode *inode = NULL; | 1195 | struct inode *inode = NULL; |
1196 | struct qstr dotdot = {.name = "..", .len = 2}; | 1196 | struct qstr dotdot = QSTR_INIT("..", 2); |
1197 | struct fileIdentDesc cfi; | 1197 | struct fileIdentDesc cfi; |
1198 | struct udf_fileident_bh fibh; | 1198 | struct udf_fileident_bh fibh; |
1199 | 1199 | ||
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index ac8e279eccc6..302f340d0071 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -146,10 +146,7 @@ static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid, | |||
146 | 146 | ||
147 | static struct dentry *ufs_get_parent(struct dentry *child) | 147 | static struct dentry *ufs_get_parent(struct dentry *child) |
148 | { | 148 | { |
149 | struct qstr dot_dot = { | 149 | struct qstr dot_dot = QSTR_INIT("..", 2); |
150 | .name = "..", | ||
151 | .len = 2, | ||
152 | }; | ||
153 | ino_t ino; | 150 | ino_t ino; |
154 | 151 | ||
155 | ino = ufs_inode_by_name(child->d_inode, &dot_dot); | 152 | ino = ufs_inode_by_name(child->d_inode, &dot_dot); |