diff options
Diffstat (limited to 'fs/ntfs')
-rw-r--r-- | fs/ntfs/ChangeLog | 15 | ||||
-rw-r--r-- | fs/ntfs/aops.c | 122 | ||||
-rw-r--r-- | fs/ntfs/bitmap.c | 5 | ||||
-rw-r--r-- | fs/ntfs/inode.c | 9 | ||||
-rw-r--r-- | fs/ntfs/layout.h | 8 | ||||
-rw-r--r-- | fs/ntfs/lcnalloc.c | 31 | ||||
-rw-r--r-- | fs/ntfs/lcnalloc.h | 27 | ||||
-rw-r--r-- | fs/ntfs/logfile.c | 30 | ||||
-rw-r--r-- | fs/ntfs/logfile.h | 2 | ||||
-rw-r--r-- | fs/ntfs/malloc.h | 4 | ||||
-rw-r--r-- | fs/ntfs/mft.c | 5 | ||||
-rw-r--r-- | fs/ntfs/runlist.c | 169 | ||||
-rw-r--r-- | fs/ntfs/unistr.c | 2 |
13 files changed, 257 insertions, 172 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 49eafbdb15c1..de58579a1d0e 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog | |||
@@ -29,7 +29,8 @@ ToDo/Notes: | |||
29 | The Windows boot will run chkdsk and then reboot. The user can then | 29 | The Windows boot will run chkdsk and then reboot. The user can then |
30 | immediately boot into Linux rather than having to do a full Windows | 30 | immediately boot into Linux rather than having to do a full Windows |
31 | boot first before rebooting into Linux and we will recognize such a | 31 | boot first before rebooting into Linux and we will recognize such a |
32 | journal and empty it as it is clean by definition. | 32 | journal and empty it as it is clean by definition. Note, this only |
33 | works if chkdsk left the journal in an obviously clean state. | ||
33 | - Support journals ($LogFile) with only one restart page as well as | 34 | - Support journals ($LogFile) with only one restart page as well as |
34 | journals with two different restart pages. We sanity check both and | 35 | journals with two different restart pages. We sanity check both and |
35 | either use the only sane one or the more recent one of the two in the | 36 | either use the only sane one or the more recent one of the two in the |
@@ -92,6 +93,18 @@ ToDo/Notes: | |||
92 | an octal number to conform to how chmod(1) works, too. Thanks to | 93 | an octal number to conform to how chmod(1) works, too. Thanks to |
93 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of | 94 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of |
94 | my ways. | 95 | my ways. |
96 | - Fix various bugs in the runlist merging code. (Based on libntfs | ||
97 | changes by Richard Russon.) | ||
98 | - Fix sparse warnings that have crept in over time. | ||
99 | - Change ntfs_cluster_free() to require a write locked runlist on entry | ||
100 | since we otherwise get into a lock reversal deadlock if a read locked | ||
101 | runlist is passed in. In the process also change it to take an ntfs | ||
102 | inode instead of a vfs inode as parameter. | ||
103 | - Fix the definition of the CHKD ntfs record magic. It had an off by | ||
104 | two error causing it to be CHKB instead of CHKD. | ||
105 | - Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the | ||
106 | count to become negative and hence we had a wild memset() scribbling | ||
107 | all over the system's ram. | ||
95 | 108 | ||
96 | 2.1.23 - Implement extension of resident files and make writing safe as well as | 109 | 2.1.23 - Implement extension of resident files and make writing safe as well as |
97 | many bug fixes, cleanups, and enhancements... | 110 | many bug fixes, cleanups, and enhancements... |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index b6cc8cf24626..5e80c07c6a4d 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -59,39 +59,49 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | struct buffer_head *first, *tmp; | 60 | struct buffer_head *first, *tmp; |
61 | struct page *page; | 61 | struct page *page; |
62 | struct inode *vi; | ||
62 | ntfs_inode *ni; | 63 | ntfs_inode *ni; |
63 | int page_uptodate = 1; | 64 | int page_uptodate = 1; |
64 | 65 | ||
65 | page = bh->b_page; | 66 | page = bh->b_page; |
66 | ni = NTFS_I(page->mapping->host); | 67 | vi = page->mapping->host; |
68 | ni = NTFS_I(vi); | ||
67 | 69 | ||
68 | if (likely(uptodate)) { | 70 | if (likely(uptodate)) { |
69 | s64 file_ofs, initialized_size; | 71 | loff_t i_size; |
72 | s64 file_ofs, init_size; | ||
70 | 73 | ||
71 | set_buffer_uptodate(bh); | 74 | set_buffer_uptodate(bh); |
72 | 75 | ||
73 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + | 76 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + |
74 | bh_offset(bh); | 77 | bh_offset(bh); |
75 | read_lock_irqsave(&ni->size_lock, flags); | 78 | read_lock_irqsave(&ni->size_lock, flags); |
76 | initialized_size = ni->initialized_size; | 79 | init_size = ni->initialized_size; |
80 | i_size = i_size_read(vi); | ||
77 | read_unlock_irqrestore(&ni->size_lock, flags); | 81 | read_unlock_irqrestore(&ni->size_lock, flags); |
82 | if (unlikely(init_size > i_size)) { | ||
83 | /* Race with shrinking truncate. */ | ||
84 | init_size = i_size; | ||
85 | } | ||
78 | /* Check for the current buffer head overflowing. */ | 86 | /* Check for the current buffer head overflowing. */ |
79 | if (file_ofs + bh->b_size > initialized_size) { | 87 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
80 | char *addr; | 88 | u8 *kaddr; |
81 | int ofs = 0; | 89 | int ofs; |
82 | 90 | ||
83 | if (file_ofs < initialized_size) | 91 | ofs = 0; |
84 | ofs = initialized_size - file_ofs; | 92 | if (file_ofs < init_size) |
85 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 93 | ofs = init_size - file_ofs; |
86 | memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); | 94 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
95 | memset(kaddr + bh_offset(bh) + ofs, 0, | ||
96 | bh->b_size - ofs); | ||
97 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
87 | flush_dcache_page(page); | 98 | flush_dcache_page(page); |
88 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
89 | } | 99 | } |
90 | } else { | 100 | } else { |
91 | clear_buffer_uptodate(bh); | 101 | clear_buffer_uptodate(bh); |
92 | SetPageError(page); | 102 | SetPageError(page); |
93 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", | 103 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " |
94 | (unsigned long long)bh->b_blocknr); | 104 | "0x%llx.", (unsigned long long)bh->b_blocknr); |
95 | } | 105 | } |
96 | first = page_buffers(page); | 106 | first = page_buffers(page); |
97 | local_irq_save(flags); | 107 | local_irq_save(flags); |
@@ -124,7 +134,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
124 | if (likely(page_uptodate && !PageError(page))) | 134 | if (likely(page_uptodate && !PageError(page))) |
125 | SetPageUptodate(page); | 135 | SetPageUptodate(page); |
126 | } else { | 136 | } else { |
127 | char *addr; | 137 | u8 *kaddr; |
128 | unsigned int i, recs; | 138 | unsigned int i, recs; |
129 | u32 rec_size; | 139 | u32 rec_size; |
130 | 140 | ||
@@ -132,12 +142,12 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
132 | recs = PAGE_CACHE_SIZE / rec_size; | 142 | recs = PAGE_CACHE_SIZE / rec_size; |
133 | /* Should have been verified before we got here... */ | 143 | /* Should have been verified before we got here... */ |
134 | BUG_ON(!recs); | 144 | BUG_ON(!recs); |
135 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 145 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
136 | for (i = 0; i < recs; i++) | 146 | for (i = 0; i < recs; i++) |
137 | post_read_mst_fixup((NTFS_RECORD*)(addr + | 147 | post_read_mst_fixup((NTFS_RECORD*)(kaddr + |
138 | i * rec_size), rec_size); | 148 | i * rec_size), rec_size); |
149 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
139 | flush_dcache_page(page); | 150 | flush_dcache_page(page); |
140 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
141 | if (likely(page_uptodate && !PageError(page))) | 151 | if (likely(page_uptodate && !PageError(page))) |
142 | SetPageUptodate(page); | 152 | SetPageUptodate(page); |
143 | } | 153 | } |
@@ -168,8 +178,11 @@ still_busy: | |||
168 | */ | 178 | */ |
169 | static int ntfs_read_block(struct page *page) | 179 | static int ntfs_read_block(struct page *page) |
170 | { | 180 | { |
181 | loff_t i_size; | ||
171 | VCN vcn; | 182 | VCN vcn; |
172 | LCN lcn; | 183 | LCN lcn; |
184 | s64 init_size; | ||
185 | struct inode *vi; | ||
173 | ntfs_inode *ni; | 186 | ntfs_inode *ni; |
174 | ntfs_volume *vol; | 187 | ntfs_volume *vol; |
175 | runlist_element *rl; | 188 | runlist_element *rl; |
@@ -180,7 +193,8 @@ static int ntfs_read_block(struct page *page) | |||
180 | int i, nr; | 193 | int i, nr; |
181 | unsigned char blocksize_bits; | 194 | unsigned char blocksize_bits; |
182 | 195 | ||
183 | ni = NTFS_I(page->mapping->host); | 196 | vi = page->mapping->host; |
197 | ni = NTFS_I(vi); | ||
184 | vol = ni->vol; | 198 | vol = ni->vol; |
185 | 199 | ||
186 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ | 200 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ |
@@ -199,11 +213,28 @@ static int ntfs_read_block(struct page *page) | |||
199 | bh = head = page_buffers(page); | 213 | bh = head = page_buffers(page); |
200 | BUG_ON(!bh); | 214 | BUG_ON(!bh); |
201 | 215 | ||
216 | /* | ||
217 | * We may be racing with truncate. To avoid some of the problems we | ||
218 | * now take a snapshot of the various sizes and use those for the whole | ||
219 | * of the function. In case of an extending truncate it just means we | ||
220 | * may leave some buffers unmapped which are now allocated. This is | ||
221 | * not a problem since these buffers will just get mapped when a write | ||
222 | * occurs. In case of a shrinking truncate, we will detect this later | ||
223 | * on due to the runlist being incomplete and if the page is being | ||
224 | * fully truncated, truncate will throw it away as soon as we unlock | ||
225 | * it so no need to worry what we do with it. | ||
226 | */ | ||
202 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 227 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
203 | read_lock_irqsave(&ni->size_lock, flags); | 228 | read_lock_irqsave(&ni->size_lock, flags); |
204 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; | 229 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
205 | zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; | 230 | init_size = ni->initialized_size; |
231 | i_size = i_size_read(vi); | ||
206 | read_unlock_irqrestore(&ni->size_lock, flags); | 232 | read_unlock_irqrestore(&ni->size_lock, flags); |
233 | if (unlikely(init_size > i_size)) { | ||
234 | /* Race with shrinking truncate. */ | ||
235 | init_size = i_size; | ||
236 | } | ||
237 | zblock = (init_size + blocksize - 1) >> blocksize_bits; | ||
207 | 238 | ||
208 | /* Loop through all the buffers in the page. */ | 239 | /* Loop through all the buffers in the page. */ |
209 | rl = NULL; | 240 | rl = NULL; |
@@ -366,6 +397,8 @@ handle_zblock: | |||
366 | */ | 397 | */ |
367 | static int ntfs_readpage(struct file *file, struct page *page) | 398 | static int ntfs_readpage(struct file *file, struct page *page) |
368 | { | 399 | { |
400 | loff_t i_size; | ||
401 | struct inode *vi; | ||
369 | ntfs_inode *ni, *base_ni; | 402 | ntfs_inode *ni, *base_ni; |
370 | u8 *kaddr; | 403 | u8 *kaddr; |
371 | ntfs_attr_search_ctx *ctx; | 404 | ntfs_attr_search_ctx *ctx; |
@@ -384,14 +417,17 @@ retry_readpage: | |||
384 | unlock_page(page); | 417 | unlock_page(page); |
385 | return 0; | 418 | return 0; |
386 | } | 419 | } |
387 | ni = NTFS_I(page->mapping->host); | 420 | vi = page->mapping->host; |
421 | ni = NTFS_I(vi); | ||
388 | /* | 422 | /* |
389 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 423 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
390 | * attributes can be compressed. Index root can have the flags set but | 424 | * attributes can be compressed. Index root can have the flags set but |
391 | * this means to create compressed/encrypted files, not that the | 425 | * this means to create compressed/encrypted files, not that the |
392 | * attribute is compressed/encrypted. | 426 | * attribute is compressed/encrypted. Note we need to check for |
427 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
428 | * index inodes. | ||
393 | */ | 429 | */ |
394 | if (ni->type != AT_INDEX_ROOT) { | 430 | if (ni->type != AT_INDEX_ALLOCATION) { |
395 | /* If attribute is encrypted, deny access, just like NT4. */ | 431 | /* If attribute is encrypted, deny access, just like NT4. */ |
396 | if (NInoEncrypted(ni)) { | 432 | if (NInoEncrypted(ni)) { |
397 | BUG_ON(ni->type != AT_DATA); | 433 | BUG_ON(ni->type != AT_DATA); |
@@ -456,7 +492,12 @@ retry_readpage: | |||
456 | read_lock_irqsave(&ni->size_lock, flags); | 492 | read_lock_irqsave(&ni->size_lock, flags); |
457 | if (unlikely(attr_len > ni->initialized_size)) | 493 | if (unlikely(attr_len > ni->initialized_size)) |
458 | attr_len = ni->initialized_size; | 494 | attr_len = ni->initialized_size; |
495 | i_size = i_size_read(vi); | ||
459 | read_unlock_irqrestore(&ni->size_lock, flags); | 496 | read_unlock_irqrestore(&ni->size_lock, flags); |
497 | if (unlikely(attr_len > i_size)) { | ||
498 | /* Race with shrinking truncate. */ | ||
499 | attr_len = i_size; | ||
500 | } | ||
460 | kaddr = kmap_atomic(page, KM_USER0); | 501 | kaddr = kmap_atomic(page, KM_USER0); |
461 | /* Copy the data to the page. */ | 502 | /* Copy the data to the page. */ |
462 | memcpy(kaddr, (u8*)ctx->attr + | 503 | memcpy(kaddr, (u8*)ctx->attr + |
@@ -1341,9 +1382,11 @@ retry_writepage: | |||
1341 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 1382 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
1342 | * attributes can be compressed. Index root can have the flags set but | 1383 | * attributes can be compressed. Index root can have the flags set but |
1343 | * this means to create compressed/encrypted files, not that the | 1384 | * this means to create compressed/encrypted files, not that the |
1344 | * attribute is compressed/encrypted. | 1385 | * attribute is compressed/encrypted. Note we need to check for |
1386 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
1387 | * index inodes. | ||
1345 | */ | 1388 | */ |
1346 | if (ni->type != AT_INDEX_ROOT) { | 1389 | if (ni->type != AT_INDEX_ALLOCATION) { |
1347 | /* If file is encrypted, deny access, just like NT4. */ | 1390 | /* If file is encrypted, deny access, just like NT4. */ |
1348 | if (NInoEncrypted(ni)) { | 1391 | if (NInoEncrypted(ni)) { |
1349 | unlock_page(page); | 1392 | unlock_page(page); |
@@ -1379,8 +1422,8 @@ retry_writepage: | |||
1379 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; | 1422 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; |
1380 | kaddr = kmap_atomic(page, KM_USER0); | 1423 | kaddr = kmap_atomic(page, KM_USER0); |
1381 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); | 1424 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); |
1382 | flush_dcache_page(page); | ||
1383 | kunmap_atomic(kaddr, KM_USER0); | 1425 | kunmap_atomic(kaddr, KM_USER0); |
1426 | flush_dcache_page(page); | ||
1384 | } | 1427 | } |
1385 | /* Handle mst protected attributes. */ | 1428 | /* Handle mst protected attributes. */ |
1386 | if (NInoMstProtected(ni)) | 1429 | if (NInoMstProtected(ni)) |
@@ -1443,34 +1486,33 @@ retry_writepage: | |||
1443 | BUG_ON(PageWriteback(page)); | 1486 | BUG_ON(PageWriteback(page)); |
1444 | set_page_writeback(page); | 1487 | set_page_writeback(page); |
1445 | unlock_page(page); | 1488 | unlock_page(page); |
1446 | /* | ||
1447 | * Here, we do not need to zero the out of bounds area everytime | ||
1448 | * because the below memcpy() already takes care of the | ||
1449 | * mmap-at-end-of-file requirements. If the file is converted to a | ||
1450 | * non-resident one, then the code path use is switched to the | ||
1451 | * non-resident one where the zeroing happens on each ntfs_writepage() | ||
1452 | * invocation. | ||
1453 | */ | ||
1454 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 1489 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
1455 | i_size = i_size_read(vi); | 1490 | i_size = i_size_read(vi); |
1456 | if (unlikely(attr_len > i_size)) { | 1491 | if (unlikely(attr_len > i_size)) { |
1492 | /* Race with shrinking truncate or a failed truncate. */ | ||
1457 | attr_len = i_size; | 1493 | attr_len = i_size; |
1458 | ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); | 1494 | /* |
1495 | * If the truncate failed, fix it up now. If a concurrent | ||
1496 | * truncate, we do its job, so it does not have to do anything. | ||
1497 | */ | ||
1498 | err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, | ||
1499 | attr_len); | ||
1500 | /* Shrinking cannot fail. */ | ||
1501 | BUG_ON(err); | ||
1459 | } | 1502 | } |
1460 | kaddr = kmap_atomic(page, KM_USER0); | 1503 | kaddr = kmap_atomic(page, KM_USER0); |
1461 | /* Copy the data from the page to the mft record. */ | 1504 | /* Copy the data from the page to the mft record. */ |
1462 | memcpy((u8*)ctx->attr + | 1505 | memcpy((u8*)ctx->attr + |
1463 | le16_to_cpu(ctx->attr->data.resident.value_offset), | 1506 | le16_to_cpu(ctx->attr->data.resident.value_offset), |
1464 | kaddr, attr_len); | 1507 | kaddr, attr_len); |
1465 | flush_dcache_mft_record_page(ctx->ntfs_ino); | ||
1466 | /* Zero out of bounds area in the page cache page. */ | 1508 | /* Zero out of bounds area in the page cache page. */ |
1467 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); | 1509 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); |
1468 | flush_dcache_page(page); | ||
1469 | kunmap_atomic(kaddr, KM_USER0); | 1510 | kunmap_atomic(kaddr, KM_USER0); |
1470 | 1511 | flush_dcache_mft_record_page(ctx->ntfs_ino); | |
1512 | flush_dcache_page(page); | ||
1513 | /* We are done with the page. */ | ||
1471 | end_page_writeback(page); | 1514 | end_page_writeback(page); |
1472 | 1515 | /* Finally, mark the mft record dirty, so it gets written back. */ | |
1473 | /* Mark the mft record dirty, so it gets written back. */ | ||
1474 | mark_mft_record_dirty(ctx->ntfs_ino); | 1516 | mark_mft_record_dirty(ctx->ntfs_ino); |
1475 | ntfs_attr_put_search_ctx(ctx); | 1517 | ntfs_attr_put_search_ctx(ctx); |
1476 | unmap_mft_record(base_ni); | 1518 | unmap_mft_record(base_ni); |
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 12cf2e30c7dd..7a190cdc60e2 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. | 2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Anton Altaparmakov | 4 | * Copyright (c) 2004-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, | |||
90 | /* If the first byte is partial, modify the appropriate bits in it. */ | 90 | /* If the first byte is partial, modify the appropriate bits in it. */ |
91 | if (bit) { | 91 | if (bit) { |
92 | u8 *byte = kaddr + pos; | 92 | u8 *byte = kaddr + pos; |
93 | while ((bit & 7) && cnt--) { | 93 | while ((bit & 7) && cnt) { |
94 | cnt--; | ||
94 | if (value) | 95 | if (value) |
95 | *byte |= 1 << bit++; | 96 | *byte |= 1 << bit++; |
96 | else | 97 | else |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index dc4bbe3acf5c..7ec045131808 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
@@ -1166,6 +1166,8 @@ err_out: | |||
1166 | * | 1166 | * |
1167 | * Return 0 on success and -errno on error. In the error case, the inode will | 1167 | * Return 0 on success and -errno on error. In the error case, the inode will |
1168 | * have had make_bad_inode() executed on it. | 1168 | * have had make_bad_inode() executed on it. |
1169 | * | ||
1170 | * Note this cannot be called for AT_INDEX_ALLOCATION. | ||
1169 | */ | 1171 | */ |
1170 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | 1172 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) |
1171 | { | 1173 | { |
@@ -1242,8 +1244,8 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1242 | } | 1244 | } |
1243 | } | 1245 | } |
1244 | /* | 1246 | /* |
1245 | * The encryption flag set in an index root just means to | 1247 | * The compressed/sparse flag set in an index root just means |
1246 | * compress all files. | 1248 | * to compress all files. |
1247 | */ | 1249 | */ |
1248 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { | 1250 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { |
1249 | ntfs_error(vi->i_sb, "Found mst protected attribute " | 1251 | ntfs_error(vi->i_sb, "Found mst protected attribute " |
@@ -1319,8 +1321,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1319 | "the mapping pairs array."); | 1321 | "the mapping pairs array."); |
1320 | goto unm_err_out; | 1322 | goto unm_err_out; |
1321 | } | 1323 | } |
1322 | if ((NInoCompressed(ni) || NInoSparse(ni)) && | 1324 | if (NInoCompressed(ni) || NInoSparse(ni)) { |
1323 | ni->type != AT_INDEX_ROOT) { | ||
1324 | if (a->data.non_resident.compression_unit != 4) { | 1325 | if (a->data.non_resident.compression_unit != 4) { |
1325 | ntfs_error(vi->i_sb, "Found nonstandard " | 1326 | ntfs_error(vi->i_sb, "Found nonstandard " |
1326 | "compression unit (%u instead " | 1327 | "compression unit (%u instead " |
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 609ad1728ce4..5c248d404f05 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h | |||
@@ -123,7 +123,7 @@ enum { | |||
123 | magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ | 123 | magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ |
124 | 124 | ||
125 | /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ | 125 | /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ |
126 | magic_CHKD = const_cpu_to_le32(0x424b4843), /* Modified by chkdsk. */ | 126 | magic_CHKD = const_cpu_to_le32(0x444b4843), /* Modified by chkdsk. */ |
127 | 127 | ||
128 | /* Found in all ntfs record containing records. */ | 128 | /* Found in all ntfs record containing records. */ |
129 | magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector | 129 | magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector |
@@ -308,10 +308,8 @@ typedef le16 MFT_RECORD_FLAGS; | |||
308 | * The _LE versions are to be applied on little endian MFT_REFs. | 308 | * The _LE versions are to be applied on little endian MFT_REFs. |
309 | * Note: The _LE versions will return a CPU endian formatted value! | 309 | * Note: The _LE versions will return a CPU endian formatted value! |
310 | */ | 310 | */ |
311 | typedef enum { | 311 | #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL |
312 | MFT_REF_MASK_CPU = 0x0000ffffffffffffULL, | 312 | #define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU) |
313 | MFT_REF_MASK_LE = const_cpu_to_le64(0x0000ffffffffffffULL), | ||
314 | } MFT_REF_CONSTS; | ||
315 | 313 | ||
316 | typedef u64 MFT_REF; | 314 | typedef u64 MFT_REF; |
317 | typedef le64 leMFT_REF; | 315 | typedef le64 leMFT_REF; |
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c index 7b5934290685..5af3bf0b7eee 100644 --- a/fs/ntfs/lcnalloc.c +++ b/fs/ntfs/lcnalloc.c | |||
@@ -779,14 +779,13 @@ out: | |||
779 | 779 | ||
780 | /** | 780 | /** |
781 | * __ntfs_cluster_free - free clusters on an ntfs volume | 781 | * __ntfs_cluster_free - free clusters on an ntfs volume |
782 | * @vi: vfs inode whose runlist describes the clusters to free | 782 | * @ni: ntfs inode whose runlist describes the clusters to free |
783 | * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters | 783 | * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters |
784 | * @count: number of clusters to free or -1 for all clusters | 784 | * @count: number of clusters to free or -1 for all clusters |
785 | * @write_locked: true if the runlist is locked for writing | ||
786 | * @is_rollback: true if this is a rollback operation | 785 | * @is_rollback: true if this is a rollback operation |
787 | * | 786 | * |
788 | * Free @count clusters starting at the cluster @start_vcn in the runlist | 787 | * Free @count clusters starting at the cluster @start_vcn in the runlist |
789 | * described by the vfs inode @vi. | 788 | * described by the vfs inode @ni. |
790 | * | 789 | * |
791 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are | 790 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are |
792 | * deallocated. Thus, to completely free all clusters in a runlist, use | 791 | * deallocated. Thus, to completely free all clusters in a runlist, use |
@@ -801,31 +800,28 @@ out: | |||
801 | * Return the number of deallocated clusters (not counting sparse ones) on | 800 | * Return the number of deallocated clusters (not counting sparse ones) on |
802 | * success and -errno on error. | 801 | * success and -errno on error. |
803 | * | 802 | * |
804 | * Locking: - The runlist described by @vi must be locked on entry and is | 803 | * Locking: - The runlist described by @ni must be locked for writing on entry |
805 | * locked on return. Note if the runlist is locked for reading the | 804 | * and is locked on return. Note the runlist may be modified when |
806 | * lock may be dropped and reacquired. Note the runlist may be | 805 | * needed runlist fragments need to be mapped. |
807 | * modified when needed runlist fragments need to be mapped. | ||
808 | * - The volume lcn bitmap must be unlocked on entry and is unlocked | 806 | * - The volume lcn bitmap must be unlocked on entry and is unlocked |
809 | * on return. | 807 | * on return. |
810 | * - This function takes the volume lcn bitmap lock for writing and | 808 | * - This function takes the volume lcn bitmap lock for writing and |
811 | * modifies the bitmap contents. | 809 | * modifies the bitmap contents. |
812 | */ | 810 | */ |
813 | s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | 811 | s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, |
814 | const BOOL write_locked, const BOOL is_rollback) | 812 | const BOOL is_rollback) |
815 | { | 813 | { |
816 | s64 delta, to_free, total_freed, real_freed; | 814 | s64 delta, to_free, total_freed, real_freed; |
817 | ntfs_inode *ni; | ||
818 | ntfs_volume *vol; | 815 | ntfs_volume *vol; |
819 | struct inode *lcnbmp_vi; | 816 | struct inode *lcnbmp_vi; |
820 | runlist_element *rl; | 817 | runlist_element *rl; |
821 | int err; | 818 | int err; |
822 | 819 | ||
823 | BUG_ON(!vi); | 820 | BUG_ON(!ni); |
824 | ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " | 821 | ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " |
825 | "0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn, | 822 | "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn, |
826 | (unsigned long long)count, | 823 | (unsigned long long)count, |
827 | is_rollback ? " (rollback)" : ""); | 824 | is_rollback ? " (rollback)" : ""); |
828 | ni = NTFS_I(vi); | ||
829 | vol = ni->vol; | 825 | vol = ni->vol; |
830 | lcnbmp_vi = vol->lcnbmp_ino; | 826 | lcnbmp_vi = vol->lcnbmp_ino; |
831 | BUG_ON(!lcnbmp_vi); | 827 | BUG_ON(!lcnbmp_vi); |
@@ -843,7 +839,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | |||
843 | 839 | ||
844 | total_freed = real_freed = 0; | 840 | total_freed = real_freed = 0; |
845 | 841 | ||
846 | rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, write_locked); | 842 | rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, TRUE); |
847 | if (IS_ERR(rl)) { | 843 | if (IS_ERR(rl)) { |
848 | if (!is_rollback) | 844 | if (!is_rollback) |
849 | ntfs_error(vol->sb, "Failed to find first runlist " | 845 | ntfs_error(vol->sb, "Failed to find first runlist " |
@@ -897,7 +893,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | |||
897 | 893 | ||
898 | /* Attempt to map runlist. */ | 894 | /* Attempt to map runlist. */ |
899 | vcn = rl->vcn; | 895 | vcn = rl->vcn; |
900 | rl = ntfs_attr_find_vcn_nolock(ni, vcn, write_locked); | 896 | rl = ntfs_attr_find_vcn_nolock(ni, vcn, TRUE); |
901 | if (IS_ERR(rl)) { | 897 | if (IS_ERR(rl)) { |
902 | err = PTR_ERR(rl); | 898 | err = PTR_ERR(rl); |
903 | if (!is_rollback) | 899 | if (!is_rollback) |
@@ -965,8 +961,7 @@ err_out: | |||
965 | * If rollback fails, set the volume errors flag, emit an error | 961 | * If rollback fails, set the volume errors flag, emit an error |
966 | * message, and return the error code. | 962 | * message, and return the error code. |
967 | */ | 963 | */ |
968 | delta = __ntfs_cluster_free(vi, start_vcn, total_freed, write_locked, | 964 | delta = __ntfs_cluster_free(ni, start_vcn, total_freed, TRUE); |
969 | TRUE); | ||
970 | if (delta < 0) { | 965 | if (delta < 0) { |
971 | ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " | 966 | ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " |
972 | "inconsistent metadata! Unmount and run " | 967 | "inconsistent metadata! Unmount and run " |
diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h index e4d7fb98d685..a6a8827882e7 100644 --- a/fs/ntfs/lcnalloc.h +++ b/fs/ntfs/lcnalloc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the | 2 | * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the |
3 | * Linux-NTFS project. | 3 | * Linux-NTFS project. |
4 | * | 4 | * |
5 | * Copyright (c) 2004 Anton Altaparmakov | 5 | * Copyright (c) 2004-2005 Anton Altaparmakov |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as published | 8 | * modify it under the terms of the GNU General Public License as published |
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | 29 | ||
30 | #include "types.h" | 30 | #include "types.h" |
31 | #include "inode.h" | ||
31 | #include "runlist.h" | 32 | #include "runlist.h" |
32 | #include "volume.h" | 33 | #include "volume.h" |
33 | 34 | ||
@@ -42,18 +43,17 @@ extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, | |||
42 | const VCN start_vcn, const s64 count, const LCN start_lcn, | 43 | const VCN start_vcn, const s64 count, const LCN start_lcn, |
43 | const NTFS_CLUSTER_ALLOCATION_ZONES zone); | 44 | const NTFS_CLUSTER_ALLOCATION_ZONES zone); |
44 | 45 | ||
45 | extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | 46 | extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, |
46 | s64 count, const BOOL write_locked, const BOOL is_rollback); | 47 | s64 count, const BOOL is_rollback); |
47 | 48 | ||
48 | /** | 49 | /** |
49 | * ntfs_cluster_free - free clusters on an ntfs volume | 50 | * ntfs_cluster_free - free clusters on an ntfs volume |
50 | * @vi: vfs inode whose runlist describes the clusters to free | 51 | * @ni: ntfs inode whose runlist describes the clusters to free |
51 | * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters | 52 | * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters |
52 | * @count: number of clusters to free or -1 for all clusters | 53 | * @count: number of clusters to free or -1 for all clusters |
53 | * @write_locked: true if the runlist is locked for writing | ||
54 | * | 54 | * |
55 | * Free @count clusters starting at the cluster @start_vcn in the runlist | 55 | * Free @count clusters starting at the cluster @start_vcn in the runlist |
56 | * described by the vfs inode @vi. | 56 | * described by the ntfs inode @ni. |
57 | * | 57 | * |
58 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are | 58 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are |
59 | * deallocated. Thus, to completely free all clusters in a runlist, use | 59 | * deallocated. Thus, to completely free all clusters in a runlist, use |
@@ -65,19 +65,18 @@ extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | |||
65 | * Return the number of deallocated clusters (not counting sparse ones) on | 65 | * Return the number of deallocated clusters (not counting sparse ones) on |
66 | * success and -errno on error. | 66 | * success and -errno on error. |
67 | * | 67 | * |
68 | * Locking: - The runlist described by @vi must be locked on entry and is | 68 | * Locking: - The runlist described by @ni must be locked for writing on entry |
69 | * locked on return. Note if the runlist is locked for reading the | 69 | * and is locked on return. Note the runlist may be modified when |
70 | * lock may be dropped and reacquired. Note the runlist may be | 70 | * needed runlist fragments need to be mapped. |
71 | * modified when needed runlist fragments need to be mapped. | ||
72 | * - The volume lcn bitmap must be unlocked on entry and is unlocked | 71 | * - The volume lcn bitmap must be unlocked on entry and is unlocked |
73 | * on return. | 72 | * on return. |
74 | * - This function takes the volume lcn bitmap lock for writing and | 73 | * - This function takes the volume lcn bitmap lock for writing and |
75 | * modifies the bitmap contents. | 74 | * modifies the bitmap contents. |
76 | */ | 75 | */ |
77 | static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | 76 | static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, |
78 | s64 count, const BOOL write_locked) | 77 | s64 count) |
79 | { | 78 | { |
80 | return __ntfs_cluster_free(vi, start_vcn, count, write_locked, FALSE); | 79 | return __ntfs_cluster_free(ni, start_vcn, count, FALSE); |
81 | } | 80 | } |
82 | 81 | ||
83 | extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, | 82 | extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, |
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 0173e95500d9..0fd70295cca6 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c | |||
@@ -51,7 +51,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
51 | RESTART_PAGE_HEADER *rp, s64 pos) | 51 | RESTART_PAGE_HEADER *rp, s64 pos) |
52 | { | 52 | { |
53 | u32 logfile_system_page_size, logfile_log_page_size; | 53 | u32 logfile_system_page_size, logfile_log_page_size; |
54 | u16 usa_count, usa_ofs, usa_end, ra_ofs; | 54 | u16 ra_ofs, usa_count, usa_ofs, usa_end = 0; |
55 | BOOL have_usa = TRUE; | ||
55 | 56 | ||
56 | ntfs_debug("Entering."); | 57 | ntfs_debug("Entering."); |
57 | /* | 58 | /* |
@@ -86,6 +87,14 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
86 | (int)sle16_to_cpu(rp->minor_ver)); | 87 | (int)sle16_to_cpu(rp->minor_ver)); |
87 | return FALSE; | 88 | return FALSE; |
88 | } | 89 | } |
90 | /* | ||
91 | * If chkdsk has been run the restart page may not be protected by an | ||
92 | * update sequence array. | ||
93 | */ | ||
94 | if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) { | ||
95 | have_usa = FALSE; | ||
96 | goto skip_usa_checks; | ||
97 | } | ||
89 | /* Verify the size of the update sequence array. */ | 98 | /* Verify the size of the update sequence array. */ |
90 | usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); | 99 | usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); |
91 | if (usa_count != le16_to_cpu(rp->usa_count)) { | 100 | if (usa_count != le16_to_cpu(rp->usa_count)) { |
@@ -102,6 +111,7 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
102 | "inconsistent update sequence array offset."); | 111 | "inconsistent update sequence array offset."); |
103 | return FALSE; | 112 | return FALSE; |
104 | } | 113 | } |
114 | skip_usa_checks: | ||
105 | /* | 115 | /* |
106 | * Verify the position of the restart area. It must be: | 116 | * Verify the position of the restart area. It must be: |
107 | * - aligned to 8-byte boundary, | 117 | * - aligned to 8-byte boundary, |
@@ -109,7 +119,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
109 | * - within the system page size. | 119 | * - within the system page size. |
110 | */ | 120 | */ |
111 | ra_ofs = le16_to_cpu(rp->restart_area_offset); | 121 | ra_ofs = le16_to_cpu(rp->restart_area_offset); |
112 | if (ra_ofs & 7 || ra_ofs < usa_end || | 122 | if (ra_ofs & 7 || (have_usa ? ra_ofs < usa_end : |
123 | ra_ofs < sizeof(RESTART_PAGE_HEADER)) || | ||
113 | ra_ofs > logfile_system_page_size) { | 124 | ra_ofs > logfile_system_page_size) { |
114 | ntfs_error(vi->i_sb, "$LogFile restart page specifies " | 125 | ntfs_error(vi->i_sb, "$LogFile restart page specifies " |
115 | "inconsistent restart area offset."); | 126 | "inconsistent restart area offset."); |
@@ -402,8 +413,12 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, | |||
402 | idx++; | 413 | idx++; |
403 | } while (to_read > 0); | 414 | } while (to_read > 0); |
404 | } | 415 | } |
405 | /* Perform the multi sector transfer deprotection on the buffer. */ | 416 | /* |
406 | if (post_read_mst_fixup((NTFS_RECORD*)trp, | 417 | * Perform the multi sector transfer deprotection on the buffer if the |
418 | * restart page is protected. | ||
419 | */ | ||
420 | if ((!ntfs_is_chkd_record(trp->magic) || le16_to_cpu(trp->usa_count)) | ||
421 | && post_read_mst_fixup((NTFS_RECORD*)trp, | ||
407 | le32_to_cpu(rp->system_page_size))) { | 422 | le32_to_cpu(rp->system_page_size))) { |
408 | /* | 423 | /* |
409 | * A multi sector tranfer error was detected. We only need to | 424 | * A multi sector tranfer error was detected. We only need to |
@@ -615,11 +630,16 @@ is_empty: | |||
615 | * Otherwise just throw it away. | 630 | * Otherwise just throw it away. |
616 | */ | 631 | */ |
617 | if (rstr2_lsn > rstr1_lsn) { | 632 | if (rstr2_lsn > rstr1_lsn) { |
633 | ntfs_debug("Using second restart page as it is more " | ||
634 | "recent."); | ||
618 | ntfs_free(rstr1_ph); | 635 | ntfs_free(rstr1_ph); |
619 | rstr1_ph = rstr2_ph; | 636 | rstr1_ph = rstr2_ph; |
620 | /* rstr1_lsn = rstr2_lsn; */ | 637 | /* rstr1_lsn = rstr2_lsn; */ |
621 | } else | 638 | } else { |
639 | ntfs_debug("Using first restart page as it is more " | ||
640 | "recent."); | ||
622 | ntfs_free(rstr2_ph); | 641 | ntfs_free(rstr2_ph); |
642 | } | ||
623 | rstr2_ph = NULL; | 643 | rstr2_ph = NULL; |
624 | } | 644 | } |
625 | /* All consistency checks passed. */ | 645 | /* All consistency checks passed. */ |
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h index 42388f95ea6d..a51f3dd0e9eb 100644 --- a/fs/ntfs/logfile.h +++ b/fs/ntfs/logfile.h | |||
@@ -113,7 +113,7 @@ typedef struct { | |||
113 | */ | 113 | */ |
114 | enum { | 114 | enum { |
115 | RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), | 115 | RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), |
116 | RESTART_SPACE_FILLER = 0xffff, /* gcc: Force enum bit width to 16. */ | 116 | RESTART_SPACE_FILLER = const_cpu_to_le16(0xffff), /* gcc: Force enum bit width to 16. */ |
117 | } __attribute__ ((__packed__)); | 117 | } __attribute__ ((__packed__)); |
118 | 118 | ||
119 | typedef le16 RESTART_AREA_FLAGS; | 119 | typedef le16 RESTART_AREA_FLAGS; |
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index 3288bcc2c4aa..590887b943f5 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. | 2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -40,7 +40,7 @@ | |||
40 | * Depending on @gfp_mask the allocation may be guaranteed to succeed. | 40 | * Depending on @gfp_mask the allocation may be guaranteed to succeed. |
41 | */ | 41 | */ |
42 | static inline void *__ntfs_malloc(unsigned long size, | 42 | static inline void *__ntfs_malloc(unsigned long size, |
43 | unsigned int __nocast gfp_mask) | 43 | gfp_t gfp_mask) |
44 | { | 44 | { |
45 | if (likely(size <= PAGE_SIZE)) { | 45 | if (likely(size <= PAGE_SIZE)) { |
46 | BUG_ON(!size); | 46 | BUG_ON(!size); |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 2c32b84385a8..b011369b5956 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) | |||
58 | * overflowing the unsigned long, but I don't think we would ever get | 58 | * overflowing the unsigned long, but I don't think we would ever get |
59 | * here if the volume was that big... | 59 | * here if the volume was that big... |
60 | */ | 60 | */ |
61 | index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; | 61 | index = (u64)ni->mft_no << vol->mft_record_size_bits >> |
62 | PAGE_CACHE_SHIFT; | ||
62 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; | 63 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; |
63 | 64 | ||
64 | i_size = i_size_read(mft_vi); | 65 | i_size = i_size_read(mft_vi); |
@@ -1953,7 +1954,7 @@ restore_undo_alloc: | |||
1953 | a = ctx->attr; | 1954 | a = ctx->attr; |
1954 | a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); | 1955 | a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); |
1955 | undo_alloc: | 1956 | undo_alloc: |
1956 | if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1, TRUE) < 0) { | 1957 | if (ntfs_cluster_free(mft_ni, old_last_vcn, -1) < 0) { |
1957 | ntfs_error(vol->sb, "Failed to free clusters from mft data " | 1958 | ntfs_error(vol->sb, "Failed to free clusters from mft data " |
1958 | "attribute.%s", es); | 1959 | "attribute.%s", es); |
1959 | NVolSetErrors(vol); | 1960 | NVolSetErrors(vol); |
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index f5b2ac929081..061b5ff6b73c 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. | 2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2005 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002-2005 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as published | 8 | * modify it under the terms of the GNU General Public License as published |
@@ -158,17 +158,21 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst, | |||
158 | BUG_ON(!dst); | 158 | BUG_ON(!dst); |
159 | BUG_ON(!src); | 159 | BUG_ON(!src); |
160 | 160 | ||
161 | if ((dst->lcn < 0) || (src->lcn < 0)) { /* Are we merging holes? */ | 161 | /* We can merge unmapped regions even if they are misaligned. */ |
162 | if (dst->lcn == LCN_HOLE && src->lcn == LCN_HOLE) | 162 | if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) |
163 | return TRUE; | 163 | return TRUE; |
164 | /* If the runs are misaligned, we cannot merge them. */ | ||
165 | if ((dst->vcn + dst->length) != src->vcn) | ||
164 | return FALSE; | 166 | return FALSE; |
165 | } | 167 | /* If both runs are non-sparse and contiguous, we can merge them. */ |
166 | if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */ | 168 | if ((dst->lcn >= 0) && (src->lcn >= 0) && |
167 | return FALSE; | 169 | ((dst->lcn + dst->length) == src->lcn)) |
168 | if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */ | 170 | return TRUE; |
169 | return FALSE; | 171 | /* If we are merging two holes, we can merge them. */ |
170 | 172 | if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) | |
171 | return TRUE; | 173 | return TRUE; |
174 | /* Cannot merge. */ | ||
175 | return FALSE; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | /** | 178 | /** |
@@ -214,14 +218,15 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) | |||
214 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, | 218 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, |
215 | int dsize, runlist_element *src, int ssize, int loc) | 219 | int dsize, runlist_element *src, int ssize, int loc) |
216 | { | 220 | { |
217 | BOOL right; | 221 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
218 | int magic; | 222 | int marker; /* End of the inserted runs. */ |
219 | 223 | ||
220 | BUG_ON(!dst); | 224 | BUG_ON(!dst); |
221 | BUG_ON(!src); | 225 | BUG_ON(!src); |
222 | 226 | ||
223 | /* First, check if the right hand end needs merging. */ | 227 | /* First, check if the right hand end needs merging. */ |
224 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 228 | if ((loc + 1) < dsize) |
229 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
225 | 230 | ||
226 | /* Space required: @dst size + @src size, less one if we merged. */ | 231 | /* Space required: @dst size + @src size, less one if we merged. */ |
227 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); | 232 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); |
@@ -236,18 +241,19 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
236 | if (right) | 241 | if (right) |
237 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 242 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
238 | 243 | ||
239 | magic = loc + ssize; | 244 | /* First run after the @src runs that have been inserted. */ |
245 | marker = loc + ssize + 1; | ||
240 | 246 | ||
241 | /* Move the tail of @dst out of the way, then copy in @src. */ | 247 | /* Move the tail of @dst out of the way, then copy in @src. */ |
242 | ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right); | 248 | ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); |
243 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); | 249 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); |
244 | 250 | ||
245 | /* Adjust the size of the preceding hole. */ | 251 | /* Adjust the size of the preceding hole. */ |
246 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; | 252 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
247 | 253 | ||
248 | /* We may have changed the length of the file, so fix the end marker */ | 254 | /* We may have changed the length of the file, so fix the end marker */ |
249 | if (dst[magic + 1].lcn == LCN_ENOENT) | 255 | if (dst[marker].lcn == LCN_ENOENT) |
250 | dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length; | 256 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
251 | 257 | ||
252 | return dst; | 258 | return dst; |
253 | } | 259 | } |
@@ -279,18 +285,17 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
279 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | 285 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, |
280 | int dsize, runlist_element *src, int ssize, int loc) | 286 | int dsize, runlist_element *src, int ssize, int loc) |
281 | { | 287 | { |
282 | BOOL left = FALSE; | 288 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
283 | BOOL disc = FALSE; /* Discontinuity */ | 289 | BOOL disc = FALSE; /* Discontinuity between @dst and @src. */ |
284 | BOOL hole = FALSE; /* Following a hole */ | 290 | int marker; /* End of the inserted runs. */ |
285 | int magic; | ||
286 | 291 | ||
287 | BUG_ON(!dst); | 292 | BUG_ON(!dst); |
288 | BUG_ON(!src); | 293 | BUG_ON(!src); |
289 | 294 | ||
290 | /* disc => Discontinuity between the end of @dst and the start of @src. | 295 | /* |
291 | * This means we might need to insert a hole. | 296 | * disc => Discontinuity between the end of @dst and the start of @src. |
292 | * hole => @dst ends with a hole or an unmapped region which we can | 297 | * This means we might need to insert a "not mapped" run. |
293 | * extend to match the discontinuity. */ | 298 | */ |
294 | if (loc == 0) | 299 | if (loc == 0) |
295 | disc = (src[0].vcn > 0); | 300 | disc = (src[0].vcn > 0); |
296 | else { | 301 | else { |
@@ -303,58 +308,49 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
303 | merged_length += src->length; | 308 | merged_length += src->length; |
304 | 309 | ||
305 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); | 310 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); |
306 | if (disc) | ||
307 | hole = (dst[loc - 1].lcn == LCN_HOLE); | ||
308 | } | 311 | } |
309 | 312 | /* | |
310 | /* Space required: @dst size + @src size, less one if we merged, plus | 313 | * Space required: @dst size + @src size, less one if we merged, plus |
311 | * one if there was a discontinuity, less one for a trailing hole. */ | 314 | * one if there was a discontinuity. |
312 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole); | 315 | */ |
316 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); | ||
313 | if (IS_ERR(dst)) | 317 | if (IS_ERR(dst)) |
314 | return dst; | 318 | return dst; |
315 | /* | 319 | /* |
316 | * We are guaranteed to succeed from here so can start modifying the | 320 | * We are guaranteed to succeed from here so can start modifying the |
317 | * original runlist. | 321 | * original runlist. |
318 | */ | 322 | */ |
319 | |||
320 | if (left) | 323 | if (left) |
321 | __ntfs_rl_merge(dst + loc - 1, src); | 324 | __ntfs_rl_merge(dst + loc - 1, src); |
322 | 325 | /* | |
323 | magic = loc + ssize - left + disc - hole; | 326 | * First run after the @src runs that have been inserted. |
327 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
328 | * runs in @src. However, if @left, then the first run in @src has | ||
329 | * been merged with one in @dst. And if @disc, then @dst and @src do | ||
330 | * not meet and we need an extra run to fill the gap. | ||
331 | */ | ||
332 | marker = loc + ssize - left + disc; | ||
324 | 333 | ||
325 | /* Move the tail of @dst out of the way, then copy in @src. */ | 334 | /* Move the tail of @dst out of the way, then copy in @src. */ |
326 | ntfs_rl_mm(dst, magic, loc, dsize - loc); | 335 | ntfs_rl_mm(dst, marker, loc, dsize - loc); |
327 | ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left); | 336 | ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); |
328 | 337 | ||
329 | /* Adjust the VCN of the last run ... */ | 338 | /* Adjust the VCN of the first run after the insertion... */ |
330 | if (dst[magic].lcn <= LCN_HOLE) | 339 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
331 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | ||
332 | /* ... and the length. */ | 340 | /* ... and the length. */ |
333 | if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED) | 341 | if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED) |
334 | dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn; | 342 | dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; |
335 | 343 | ||
336 | /* Writing beyond the end of the file and there's a discontinuity. */ | 344 | /* Writing beyond the end of the file and there is a discontinuity. */ |
337 | if (disc) { | 345 | if (disc) { |
338 | if (hole) | 346 | if (loc > 0) { |
339 | dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn; | 347 | dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; |
340 | else { | 348 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
341 | if (loc > 0) { | 349 | } else { |
342 | dst[loc].vcn = dst[loc - 1].vcn + | 350 | dst[loc].vcn = 0; |
343 | dst[loc - 1].length; | 351 | dst[loc].length = dst[loc + 1].vcn; |
344 | dst[loc].length = dst[loc + 1].vcn - | ||
345 | dst[loc].vcn; | ||
346 | } else { | ||
347 | dst[loc].vcn = 0; | ||
348 | dst[loc].length = dst[loc + 1].vcn; | ||
349 | } | ||
350 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | ||
351 | } | 352 | } |
352 | 353 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | |
353 | magic += hole; | ||
354 | |||
355 | if (dst[magic].lcn == LCN_ENOENT) | ||
356 | dst[magic].vcn = dst[magic - 1].vcn + | ||
357 | dst[magic - 1].length; | ||
358 | } | 354 | } |
359 | return dst; | 355 | return dst; |
360 | } | 356 | } |
@@ -385,20 +381,23 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
385 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | 381 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, |
386 | int dsize, runlist_element *src, int ssize, int loc) | 382 | int dsize, runlist_element *src, int ssize, int loc) |
387 | { | 383 | { |
388 | BOOL left = FALSE; | 384 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
389 | BOOL right; | 385 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
390 | int magic; | 386 | int tail; /* Start of tail of @dst. */ |
387 | int marker; /* End of the inserted runs. */ | ||
391 | 388 | ||
392 | BUG_ON(!dst); | 389 | BUG_ON(!dst); |
393 | BUG_ON(!src); | 390 | BUG_ON(!src); |
394 | 391 | ||
395 | /* First, merge the left and right ends, if necessary. */ | 392 | /* First, see if the left and right ends need merging. */ |
396 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 393 | if ((loc + 1) < dsize) |
394 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
397 | if (loc > 0) | 395 | if (loc > 0) |
398 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); | 396 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); |
399 | 397 | /* | |
400 | /* Allocate some space. We'll need less if the left, right, or both | 398 | * Allocate some space. We will need less if the left, right, or both |
401 | * ends were merged. */ | 399 | * ends get merged. |
400 | */ | ||
402 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); | 401 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); |
403 | if (IS_ERR(dst)) | 402 | if (IS_ERR(dst)) |
404 | return dst; | 403 | return dst; |
@@ -406,21 +405,37 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | |||
406 | * We are guaranteed to succeed from here so can start modifying the | 405 | * We are guaranteed to succeed from here so can start modifying the |
407 | * original runlists. | 406 | * original runlists. |
408 | */ | 407 | */ |
408 | |||
409 | /* First, merge the left and right ends, if necessary. */ | ||
409 | if (right) | 410 | if (right) |
410 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 411 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
411 | if (left) | 412 | if (left) |
412 | __ntfs_rl_merge(dst + loc - 1, src); | 413 | __ntfs_rl_merge(dst + loc - 1, src); |
413 | 414 | /* | |
414 | /* FIXME: What does this mean? (AIA) */ | 415 | * Offset of the tail of @dst. This needs to be moved out of the way |
415 | magic = loc + ssize - left; | 416 | * to make space for the runs to be copied from @src, i.e. the first |
417 | * run of the tail of @dst. | ||
418 | * Nominally, @tail equals @loc + 1, i.e. location, skipping the | ||
419 | * replaced run. However, if @right, then one of @dst's runs is | ||
420 | * already merged into @src. | ||
421 | */ | ||
422 | tail = loc + right + 1; | ||
423 | /* | ||
424 | * First run after the @src runs that have been inserted, i.e. where | ||
425 | * the tail of @dst needs to be moved to. | ||
426 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
427 | * runs in @src. However, if @left, then the first run in @src has | ||
428 | * been merged with one in @dst. | ||
429 | */ | ||
430 | marker = loc + ssize - left; | ||
416 | 431 | ||
417 | /* Move the tail of @dst out of the way, then copy in @src. */ | 432 | /* Move the tail of @dst out of the way, then copy in @src. */ |
418 | ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1); | 433 | ntfs_rl_mm(dst, marker, tail, dsize - tail); |
419 | ntfs_rl_mc(dst, loc, src, left, ssize - left); | 434 | ntfs_rl_mc(dst, loc, src, left, ssize - left); |
420 | 435 | ||
421 | /* We may have changed the length of the file, so fix the end marker */ | 436 | /* We may have changed the length of the file, so fix the end marker. */ |
422 | if (dst[magic].lcn == LCN_ENOENT) | 437 | if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) |
423 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | 438 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
424 | return dst; | 439 | return dst; |
425 | } | 440 | } |
426 | 441 | ||
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index a389a5a16c84..0ea887fc859c 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. | 2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |