aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/aops.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
commit905ec87e93bc9e01b15c60035cd6a50c636cbaef (patch)
tree46fd7618d6511611ffc19eb0dd4d7bc6b90a41c2 /fs/ntfs/aops.c
parent1d6ae775d7a948c9575658eb41184fd2e506c0df (diff)
parent2f4ba45a75d6383b4a1201169a808ffea416ffa0 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r--fs/ntfs/aops.c294
1 files changed, 170 insertions, 124 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 78adad7a988d..b6cc8cf24626 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -27,6 +27,7 @@
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
29#include <linux/writeback.h> 29#include <linux/writeback.h>
30#include <linux/bit_spinlock.h>
30 31
31#include "aops.h" 32#include "aops.h"
32#include "attrib.h" 33#include "attrib.h"
@@ -55,9 +56,8 @@
55 */ 56 */
56static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) 57static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
57{ 58{
58 static DEFINE_SPINLOCK(page_uptodate_lock);
59 unsigned long flags; 59 unsigned long flags;
60 struct buffer_head *tmp; 60 struct buffer_head *first, *tmp;
61 struct page *page; 61 struct page *page;
62 ntfs_inode *ni; 62 ntfs_inode *ni;
63 int page_uptodate = 1; 63 int page_uptodate = 1;
@@ -89,11 +89,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
89 } 89 }
90 } else { 90 } else {
91 clear_buffer_uptodate(bh); 91 clear_buffer_uptodate(bh);
92 SetPageError(page);
92 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", 93 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
93 (unsigned long long)bh->b_blocknr); 94 (unsigned long long)bh->b_blocknr);
94 SetPageError(page);
95 } 95 }
96 spin_lock_irqsave(&page_uptodate_lock, flags); 96 first = page_buffers(page);
97 local_irq_save(flags);
98 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
97 clear_buffer_async_read(bh); 99 clear_buffer_async_read(bh);
98 unlock_buffer(bh); 100 unlock_buffer(bh);
99 tmp = bh; 101 tmp = bh;
@@ -108,7 +110,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
108 } 110 }
109 tmp = tmp->b_this_page; 111 tmp = tmp->b_this_page;
110 } while (tmp != bh); 112 } while (tmp != bh);
111 spin_unlock_irqrestore(&page_uptodate_lock, flags); 113 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
114 local_irq_restore(flags);
112 /* 115 /*
113 * If none of the buffers had errors then we can set the page uptodate, 116 * If none of the buffers had errors then we can set the page uptodate,
114 * but we first have to perform the post read mst fixups, if the 117 * but we first have to perform the post read mst fixups, if the
@@ -141,7 +144,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
141 unlock_page(page); 144 unlock_page(page);
142 return; 145 return;
143still_busy: 146still_busy:
144 spin_unlock_irqrestore(&page_uptodate_lock, flags); 147 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
148 local_irq_restore(flags);
145 return; 149 return;
146} 150}
147 151
@@ -185,13 +189,15 @@ static int ntfs_read_block(struct page *page)
185 blocksize_bits = VFS_I(ni)->i_blkbits; 189 blocksize_bits = VFS_I(ni)->i_blkbits;
186 blocksize = 1 << blocksize_bits; 190 blocksize = 1 << blocksize_bits;
187 191
188 if (!page_has_buffers(page)) 192 if (!page_has_buffers(page)) {
189 create_empty_buffers(page, blocksize, 0); 193 create_empty_buffers(page, blocksize, 0);
190 bh = head = page_buffers(page); 194 if (unlikely(!page_has_buffers(page))) {
191 if (unlikely(!bh)) { 195 unlock_page(page);
192 unlock_page(page); 196 return -ENOMEM;
193 return -ENOMEM; 197 }
194 } 198 }
199 bh = head = page_buffers(page);
200 BUG_ON(!bh);
195 201
196 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 202 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
197 read_lock_irqsave(&ni->size_lock, flags); 203 read_lock_irqsave(&ni->size_lock, flags);
@@ -204,6 +210,7 @@ static int ntfs_read_block(struct page *page)
204 nr = i = 0; 210 nr = i = 0;
205 do { 211 do {
206 u8 *kaddr; 212 u8 *kaddr;
213 int err;
207 214
208 if (unlikely(buffer_uptodate(bh))) 215 if (unlikely(buffer_uptodate(bh)))
209 continue; 216 continue;
@@ -211,6 +218,7 @@ static int ntfs_read_block(struct page *page)
211 arr[nr++] = bh; 218 arr[nr++] = bh;
212 continue; 219 continue;
213 } 220 }
221 err = 0;
214 bh->b_bdev = vol->sb->s_bdev; 222 bh->b_bdev = vol->sb->s_bdev;
215 /* Is the block within the allowed limits? */ 223 /* Is the block within the allowed limits? */
216 if (iblock < lblock) { 224 if (iblock < lblock) {
@@ -252,7 +260,6 @@ lock_retry_remap:
252 goto handle_hole; 260 goto handle_hole;
253 /* If first try and runlist unmapped, map and retry. */ 261 /* If first try and runlist unmapped, map and retry. */
254 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { 262 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
255 int err;
256 is_retry = TRUE; 263 is_retry = TRUE;
257 /* 264 /*
258 * Attempt to map runlist, dropping lock for 265 * Attempt to map runlist, dropping lock for
@@ -263,20 +270,30 @@ lock_retry_remap:
263 if (likely(!err)) 270 if (likely(!err))
264 goto lock_retry_remap; 271 goto lock_retry_remap;
265 rl = NULL; 272 rl = NULL;
266 lcn = err;
267 } else if (!rl) 273 } else if (!rl)
268 up_read(&ni->runlist.lock); 274 up_read(&ni->runlist.lock);
275 /*
276 * If buffer is outside the runlist, treat it as a
277 * hole. This can happen due to concurrent truncate
278 * for example.
279 */
280 if (err == -ENOENT || lcn == LCN_ENOENT) {
281 err = 0;
282 goto handle_hole;
283 }
269 /* Hard error, zero out region. */ 284 /* Hard error, zero out region. */
285 if (!err)
286 err = -EIO;
270 bh->b_blocknr = -1; 287 bh->b_blocknr = -1;
271 SetPageError(page); 288 SetPageError(page);
272 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " 289 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
273 "attribute type 0x%x, vcn 0x%llx, " 290 "attribute type 0x%x, vcn 0x%llx, "
274 "offset 0x%x because its location on " 291 "offset 0x%x because its location on "
275 "disk could not be determined%s " 292 "disk could not be determined%s "
276 "(error code %lli).", ni->mft_no, 293 "(error code %i).", ni->mft_no,
277 ni->type, (unsigned long long)vcn, 294 ni->type, (unsigned long long)vcn,
278 vcn_ofs, is_retry ? " even after " 295 vcn_ofs, is_retry ? " even after "
279 "retrying" : "", (long long)lcn); 296 "retrying" : "", err);
280 } 297 }
281 /* 298 /*
282 * Either iblock was outside lblock limits or 299 * Either iblock was outside lblock limits or
@@ -289,9 +306,10 @@ handle_hole:
289handle_zblock: 306handle_zblock:
290 kaddr = kmap_atomic(page, KM_USER0); 307 kaddr = kmap_atomic(page, KM_USER0);
291 memset(kaddr + i * blocksize, 0, blocksize); 308 memset(kaddr + i * blocksize, 0, blocksize);
292 flush_dcache_page(page);
293 kunmap_atomic(kaddr, KM_USER0); 309 kunmap_atomic(kaddr, KM_USER0);
294 set_buffer_uptodate(bh); 310 flush_dcache_page(page);
311 if (likely(!err))
312 set_buffer_uptodate(bh);
295 } while (i++, iblock++, (bh = bh->b_this_page) != head); 313 } while (i++, iblock++, (bh = bh->b_this_page) != head);
296 314
297 /* Release the lock if we took it. */ 315 /* Release the lock if we took it. */
@@ -367,31 +385,38 @@ retry_readpage:
367 return 0; 385 return 0;
368 } 386 }
369 ni = NTFS_I(page->mapping->host); 387 ni = NTFS_I(page->mapping->host);
370 388 /*
389 * Only $DATA attributes can be encrypted and only unnamed $DATA
390 * attributes can be compressed. Index root can have the flags set but
391 * this means to create compressed/encrypted files, not that the
392 * attribute is compressed/encrypted.
393 */
394 if (ni->type != AT_INDEX_ROOT) {
395 /* If attribute is encrypted, deny access, just like NT4. */
396 if (NInoEncrypted(ni)) {
397 BUG_ON(ni->type != AT_DATA);
398 err = -EACCES;
399 goto err_out;
400 }
401 /* Compressed data streams are handled in compress.c. */
402 if (NInoNonResident(ni) && NInoCompressed(ni)) {
403 BUG_ON(ni->type != AT_DATA);
404 BUG_ON(ni->name_len);
405 return ntfs_read_compressed_block(page);
406 }
407 }
371 /* NInoNonResident() == NInoIndexAllocPresent() */ 408 /* NInoNonResident() == NInoIndexAllocPresent() */
372 if (NInoNonResident(ni)) { 409 if (NInoNonResident(ni)) {
373 /* 410 /* Normal, non-resident data stream. */
374 * Only unnamed $DATA attributes can be compressed or
375 * encrypted.
376 */
377 if (ni->type == AT_DATA && !ni->name_len) {
378 /* If file is encrypted, deny access, just like NT4. */
379 if (NInoEncrypted(ni)) {
380 err = -EACCES;
381 goto err_out;
382 }
383 /* Compressed data streams are handled in compress.c. */
384 if (NInoCompressed(ni))
385 return ntfs_read_compressed_block(page);
386 }
387 /* Normal data stream. */
388 return ntfs_read_block(page); 411 return ntfs_read_block(page);
389 } 412 }
390 /* 413 /*
391 * Attribute is resident, implying it is not compressed or encrypted. 414 * Attribute is resident, implying it is not compressed or encrypted.
392 * This also means the attribute is smaller than an mft record and 415 * This also means the attribute is smaller than an mft record and
393 * hence smaller than a page, so can simply zero out any pages with 416 * hence smaller than a page, so can simply zero out any pages with
394 * index above 0. 417 * index above 0. Note the attribute can actually be marked compressed
418 * but if it is resident the actual data is not compressed so we are
419 * ok to ignore the compressed flag here.
395 */ 420 */
396 if (unlikely(page->index > 0)) { 421 if (unlikely(page->index > 0)) {
397 kaddr = kmap_atomic(page, KM_USER0); 422 kaddr = kmap_atomic(page, KM_USER0);
@@ -511,19 +536,21 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
511 BUG_ON(!PageUptodate(page)); 536 BUG_ON(!PageUptodate(page));
512 create_empty_buffers(page, blocksize, 537 create_empty_buffers(page, blocksize,
513 (1 << BH_Uptodate) | (1 << BH_Dirty)); 538 (1 << BH_Uptodate) | (1 << BH_Dirty));
539 if (unlikely(!page_has_buffers(page))) {
540 ntfs_warning(vol->sb, "Error allocating page "
541 "buffers. Redirtying page so we try "
542 "again later.");
543 /*
544 * Put the page back on mapping->dirty_pages, but leave
545 * its buffers' dirty state as-is.
546 */
547 redirty_page_for_writepage(wbc, page);
548 unlock_page(page);
549 return 0;
550 }
514 } 551 }
515 bh = head = page_buffers(page); 552 bh = head = page_buffers(page);
516 if (unlikely(!bh)) { 553 BUG_ON(!bh);
517 ntfs_warning(vol->sb, "Error allocating page buffers. "
518 "Redirtying page so we try again later.");
519 /*
520 * Put the page back on mapping->dirty_pages, but leave its
521 * buffer's dirty state as-is.
522 */
523 redirty_page_for_writepage(wbc, page);
524 unlock_page(page);
525 return 0;
526 }
527 554
528 /* NOTE: Different naming scheme to ntfs_read_block()! */ 555 /* NOTE: Different naming scheme to ntfs_read_block()! */
529 556
@@ -670,6 +697,27 @@ lock_retry_remap:
670 } 697 }
671 /* It is a hole, need to instantiate it. */ 698 /* It is a hole, need to instantiate it. */
672 if (lcn == LCN_HOLE) { 699 if (lcn == LCN_HOLE) {
700 u8 *kaddr;
701 unsigned long *bpos, *bend;
702
703 /* Check if the buffer is zero. */
704 kaddr = kmap_atomic(page, KM_USER0);
705 bpos = (unsigned long *)(kaddr + bh_offset(bh));
706 bend = (unsigned long *)((u8*)bpos + blocksize);
707 do {
708 if (unlikely(*bpos))
709 break;
710 } while (likely(++bpos < bend));
711 kunmap_atomic(kaddr, KM_USER0);
712 if (bpos == bend) {
713 /*
714 * Buffer is zero and sparse, no need to write
715 * it.
716 */
717 bh->b_blocknr = -1;
718 clear_buffer_dirty(bh);
719 continue;
720 }
673 // TODO: Instantiate the hole. 721 // TODO: Instantiate the hole.
674 // clear_buffer_new(bh); 722 // clear_buffer_new(bh);
675 // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 723 // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
@@ -690,20 +738,37 @@ lock_retry_remap:
690 if (likely(!err)) 738 if (likely(!err))
691 goto lock_retry_remap; 739 goto lock_retry_remap;
692 rl = NULL; 740 rl = NULL;
693 lcn = err;
694 } else if (!rl) 741 } else if (!rl)
695 up_read(&ni->runlist.lock); 742 up_read(&ni->runlist.lock);
743 /*
744 * If buffer is outside the runlist, truncate has cut it out
745 * of the runlist. Just clean and clear the buffer and set it
746 * uptodate so it can get discarded by the VM.
747 */
748 if (err == -ENOENT || lcn == LCN_ENOENT) {
749 u8 *kaddr;
750
751 bh->b_blocknr = -1;
752 clear_buffer_dirty(bh);
753 kaddr = kmap_atomic(page, KM_USER0);
754 memset(kaddr + bh_offset(bh), 0, blocksize);
755 kunmap_atomic(kaddr, KM_USER0);
756 flush_dcache_page(page);
757 set_buffer_uptodate(bh);
758 err = 0;
759 continue;
760 }
696 /* Failed to map the buffer, even after retrying. */ 761 /* Failed to map the buffer, even after retrying. */
762 if (!err)
763 err = -EIO;
697 bh->b_blocknr = -1; 764 bh->b_blocknr = -1;
698 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " 765 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
699 "attribute type 0x%x, vcn 0x%llx, offset 0x%x " 766 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
700 "because its location on disk could not be " 767 "because its location on disk could not be "
701 "determined%s (error code %lli).", ni->mft_no, 768 "determined%s (error code %i).", ni->mft_no,
702 ni->type, (unsigned long long)vcn, 769 ni->type, (unsigned long long)vcn,
703 vcn_ofs, is_retry ? " even after " 770 vcn_ofs, is_retry ? " even after "
704 "retrying" : "", (long long)lcn); 771 "retrying" : "", err);
705 if (!err)
706 err = -EIO;
707 break; 772 break;
708 } while (block++, (bh = bh->b_this_page) != head); 773 } while (block++, (bh = bh->b_this_page) != head);
709 774
@@ -714,7 +779,7 @@ lock_retry_remap:
714 /* For the error case, need to reset bh to the beginning. */ 779 /* For the error case, need to reset bh to the beginning. */
715 bh = head; 780 bh = head;
716 781
717 /* Just an optimization, so ->readpage() isn't called later. */ 782 /* Just an optimization, so ->readpage() is not called later. */
718 if (unlikely(!PageUptodate(page))) { 783 if (unlikely(!PageUptodate(page))) {
719 int uptodate = 1; 784 int uptodate = 1;
720 do { 785 do {
@@ -730,7 +795,6 @@ lock_retry_remap:
730 795
731 /* Setup all mapped, dirty buffers for async write i/o. */ 796 /* Setup all mapped, dirty buffers for async write i/o. */
732 do { 797 do {
733 get_bh(bh);
734 if (buffer_mapped(bh) && buffer_dirty(bh)) { 798 if (buffer_mapped(bh) && buffer_dirty(bh)) {
735 lock_buffer(bh); 799 lock_buffer(bh);
736 if (test_clear_buffer_dirty(bh)) { 800 if (test_clear_buffer_dirty(bh)) {
@@ -768,14 +832,8 @@ lock_retry_remap:
768 832
769 BUG_ON(PageWriteback(page)); 833 BUG_ON(PageWriteback(page));
770 set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ 834 set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
771 unlock_page(page);
772 835
773 /* 836 /* Submit the prepared buffers for i/o. */
774 * Submit the prepared buffers for i/o. Note the page is unlocked,
775 * and the async write i/o completion handler can end_page_writeback()
776 * at any time after the *first* submit_bh(). So the buffers can then
777 * disappear...
778 */
779 need_end_writeback = TRUE; 837 need_end_writeback = TRUE;
780 do { 838 do {
781 struct buffer_head *next = bh->b_this_page; 839 struct buffer_head *next = bh->b_this_page;
@@ -783,9 +841,9 @@ lock_retry_remap:
783 submit_bh(WRITE, bh); 841 submit_bh(WRITE, bh);
784 need_end_writeback = FALSE; 842 need_end_writeback = FALSE;
785 } 843 }
786 put_bh(bh);
787 bh = next; 844 bh = next;
788 } while (bh != head); 845 } while (bh != head);
846 unlock_page(page);
789 847
790 /* If no i/o was started, need to end_page_writeback(). */ 848 /* If no i/o was started, need to end_page_writeback(). */
791 if (unlikely(need_end_writeback)) 849 if (unlikely(need_end_writeback))
@@ -860,7 +918,6 @@ static int ntfs_write_mst_block(struct page *page,
860 sync = (wbc->sync_mode == WB_SYNC_ALL); 918 sync = (wbc->sync_mode == WB_SYNC_ALL);
861 919
862 /* Make sure we have mapped buffers. */ 920 /* Make sure we have mapped buffers. */
863 BUG_ON(!page_has_buffers(page));
864 bh = head = page_buffers(page); 921 bh = head = page_buffers(page);
865 BUG_ON(!bh); 922 BUG_ON(!bh);
866 923
@@ -1280,38 +1337,42 @@ retry_writepage:
1280 ntfs_debug("Write outside i_size - truncated?"); 1337 ntfs_debug("Write outside i_size - truncated?");
1281 return 0; 1338 return 0;
1282 } 1339 }
1340 /*
1341 * Only $DATA attributes can be encrypted and only unnamed $DATA
1342 * attributes can be compressed. Index root can have the flags set but
1343 * this means to create compressed/encrypted files, not that the
1344 * attribute is compressed/encrypted.
1345 */
1346 if (ni->type != AT_INDEX_ROOT) {
1347 /* If file is encrypted, deny access, just like NT4. */
1348 if (NInoEncrypted(ni)) {
1349 unlock_page(page);
1350 BUG_ON(ni->type != AT_DATA);
1351 ntfs_debug("Denying write access to encrypted "
1352 "file.");
1353 return -EACCES;
1354 }
1355 /* Compressed data streams are handled in compress.c. */
1356 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1357 BUG_ON(ni->type != AT_DATA);
1358 BUG_ON(ni->name_len);
1359 // TODO: Implement and replace this with
1360 // return ntfs_write_compressed_block(page);
1361 unlock_page(page);
1362 ntfs_error(vi->i_sb, "Writing to compressed files is "
1363 "not supported yet. Sorry.");
1364 return -EOPNOTSUPP;
1365 }
1366 // TODO: Implement and remove this check.
1367 if (NInoNonResident(ni) && NInoSparse(ni)) {
1368 unlock_page(page);
1369 ntfs_error(vi->i_sb, "Writing to sparse files is not "
1370 "supported yet. Sorry.");
1371 return -EOPNOTSUPP;
1372 }
1373 }
1283 /* NInoNonResident() == NInoIndexAllocPresent() */ 1374 /* NInoNonResident() == NInoIndexAllocPresent() */
1284 if (NInoNonResident(ni)) { 1375 if (NInoNonResident(ni)) {
1285 /*
1286 * Only unnamed $DATA attributes can be compressed, encrypted,
1287 * and/or sparse.
1288 */
1289 if (ni->type == AT_DATA && !ni->name_len) {
1290 /* If file is encrypted, deny access, just like NT4. */
1291 if (NInoEncrypted(ni)) {
1292 unlock_page(page);
1293 ntfs_debug("Denying write access to encrypted "
1294 "file.");
1295 return -EACCES;
1296 }
1297 /* Compressed data streams are handled in compress.c. */
1298 if (NInoCompressed(ni)) {
1299 // TODO: Implement and replace this check with
1300 // return ntfs_write_compressed_block(page);
1301 unlock_page(page);
1302 ntfs_error(vi->i_sb, "Writing to compressed "
1303 "files is not supported yet. "
1304 "Sorry.");
1305 return -EOPNOTSUPP;
1306 }
1307 // TODO: Implement and remove this check.
1308 if (NInoSparse(ni)) {
1309 unlock_page(page);
1310 ntfs_error(vi->i_sb, "Writing to sparse files "
1311 "is not supported yet. Sorry.");
1312 return -EOPNOTSUPP;
1313 }
1314 }
1315 /* We have to zero every time due to mmap-at-end-of-file. */ 1376 /* We have to zero every time due to mmap-at-end-of-file. */
1316 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1377 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1317 /* The page straddles i_size. */ 1378 /* The page straddles i_size. */
@@ -1324,14 +1385,16 @@ retry_writepage:
1324 /* Handle mst protected attributes. */ 1385 /* Handle mst protected attributes. */
1325 if (NInoMstProtected(ni)) 1386 if (NInoMstProtected(ni))
1326 return ntfs_write_mst_block(page, wbc); 1387 return ntfs_write_mst_block(page, wbc);
1327 /* Normal data stream. */ 1388 /* Normal, non-resident data stream. */
1328 return ntfs_write_block(page, wbc); 1389 return ntfs_write_block(page, wbc);
1329 } 1390 }
1330 /* 1391 /*
1331 * Attribute is resident, implying it is not compressed, encrypted, 1392 * Attribute is resident, implying it is not compressed, encrypted, or
1332 * sparse, or mst protected. This also means the attribute is smaller 1393 * mst protected. This also means the attribute is smaller than an mft
1333 * than an mft record and hence smaller than a page, so can simply 1394 * record and hence smaller than a page, so can simply return error on
1334 * return error on any pages with index above 0. 1395 * any pages with index above 0. Note the attribute can actually be
1396 * marked compressed but if it is resident the actual data is not
1397 * compressed so we are ok to ignore the compressed flag here.
1335 */ 1398 */
1336 BUG_ON(page_has_buffers(page)); 1399 BUG_ON(page_has_buffers(page));
1337 BUG_ON(!PageUptodate(page)); 1400 BUG_ON(!PageUptodate(page));
@@ -1380,30 +1443,14 @@ retry_writepage:
1380 BUG_ON(PageWriteback(page)); 1443 BUG_ON(PageWriteback(page));
1381 set_page_writeback(page); 1444 set_page_writeback(page);
1382 unlock_page(page); 1445 unlock_page(page);
1383
1384 /* 1446 /*
1385 * Here, we don't need to zero the out of bounds area everytime because 1447 * Here, we do not need to zero the out of bounds area everytime
1386 * the below memcpy() already takes care of the mmap-at-end-of-file 1448 * because the below memcpy() already takes care of the
1387 * requirements. If the file is converted to a non-resident one, then 1449 * mmap-at-end-of-file requirements. If the file is converted to a
1388 * the code path use is switched to the non-resident one where the 1450 * non-resident one, then the code path use is switched to the
1389 * zeroing happens on each ntfs_writepage() invocation. 1451 * non-resident one where the zeroing happens on each ntfs_writepage()
1390 * 1452 * invocation.
1391 * The above also applies nicely when i_size is decreased.
1392 *
1393 * When i_size is increased, the memory between the old and new i_size
1394 * _must_ be zeroed (or overwritten with new data). Otherwise we will
1395 * expose data to userspace/disk which should never have been exposed.
1396 *
1397 * FIXME: Ensure that i_size increases do the zeroing/overwriting and
1398 * if we cannot guarantee that, then enable the zeroing below. If the
1399 * zeroing below is enabled, we MUST move the unlock_page() from above
1400 * to after the kunmap_atomic(), i.e. just before the
1401 * end_page_writeback().
1402 * UPDATE: ntfs_prepare/commit_write() do the zeroing on i_size
1403 * increases for resident attributes so those are ok.
1404 * TODO: ntfs_truncate(), others?
1405 */ 1453 */
1406
1407 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); 1454 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1408 i_size = i_size_read(vi); 1455 i_size = i_size_read(vi);
1409 if (unlikely(attr_len > i_size)) { 1456 if (unlikely(attr_len > i_size)) {
@@ -1681,27 +1728,25 @@ lock_retry_remap:
1681 if (likely(!err)) 1728 if (likely(!err))
1682 goto lock_retry_remap; 1729 goto lock_retry_remap;
1683 rl = NULL; 1730 rl = NULL;
1684 lcn = err;
1685 } else if (!rl) 1731 } else if (!rl)
1686 up_read(&ni->runlist.lock); 1732 up_read(&ni->runlist.lock);
1687 /* 1733 /*
1688 * Failed to map the buffer, even after 1734 * Failed to map the buffer, even after
1689 * retrying. 1735 * retrying.
1690 */ 1736 */
1737 if (!err)
1738 err = -EIO;
1691 bh->b_blocknr = -1; 1739 bh->b_blocknr = -1;
1692 ntfs_error(vol->sb, "Failed to write to inode " 1740 ntfs_error(vol->sb, "Failed to write to inode "
1693 "0x%lx, attribute type 0x%x, " 1741 "0x%lx, attribute type 0x%x, "
1694 "vcn 0x%llx, offset 0x%x " 1742 "vcn 0x%llx, offset 0x%x "
1695 "because its location on disk " 1743 "because its location on disk "
1696 "could not be determined%s " 1744 "could not be determined%s "
1697 "(error code %lli).", 1745 "(error code %i).",
1698 ni->mft_no, ni->type, 1746 ni->mft_no, ni->type,
1699 (unsigned long long)vcn, 1747 (unsigned long long)vcn,
1700 vcn_ofs, is_retry ? " even " 1748 vcn_ofs, is_retry ? " even "
1701 "after retrying" : "", 1749 "after retrying" : "", err);
1702 (long long)lcn);
1703 if (!err)
1704 err = -EIO;
1705 goto err_out; 1750 goto err_out;
1706 } 1751 }
1707 /* We now have a successful remap, i.e. lcn >= 0. */ 1752 /* We now have a successful remap, i.e. lcn >= 0. */
@@ -2357,6 +2402,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
2357 buffers_to_free = bh; 2402 buffers_to_free = bh;
2358 } 2403 }
2359 bh = head = page_buffers(page); 2404 bh = head = page_buffers(page);
2405 BUG_ON(!bh);
2360 do { 2406 do {
2361 bh_ofs = bh_offset(bh); 2407 bh_ofs = bh_offset(bh);
2362 if (bh_ofs + bh_size <= ofs) 2408 if (bh_ofs + bh_size <= ofs)